Advertisement
Mikestriken

Untitled

Apr 13th, 2025
45
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 12.87 KB | None | 0 0
  1. ---------------------------------------------------------------------------
  2. RuntimeError Traceback (most recent call last)
  3. Cell In[1], line 98
  4. 96 if __name__ == '__main__':
  5. 97 print("Downloading pretrained weights and starting fine-tuning...")
  6. ---> 98 train()
  7. 99 print("\nEvaluating fine-tuned model...")
  8. 100 test()
  9.  
  10. Cell In[1], line 67, in train()
  11. 64 labels = labels.to(device)
  12. 66 # Forward pass
  13. ---> 67 outputs = model(images).logits
  14. 68 loss = criterion(outputs, labels)
  15. 70 # Backward and optimize
  16.  
  17. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  18. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  19. 1738 else:
  20. -> 1739 return self._call_impl(*args, **kwargs)
  21.  
  22. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  23. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  24. 1746 # this function, and just call forward.
  25. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  26. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  27. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  28. -> 1750 return forward_call(*args, **kwargs)
  29. 1752 result = None
  30. 1753 called_always_called_hooks = set()
  31.  
  32. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:1266, in SwinForImageClassification.forward(self, pixel_values, head_mask, labels, output_attentions, output_hidden_states, interpolate_pos_encoding, return_dict)
  33. 1258 r"""
  34. 1259 labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  35. 1260 Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
  36. 1261 config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  37. 1262 `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  38. 1263 """
  39. 1264 return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  40. -> 1266 outputs = self.swin(
  41. 1267 pixel_values,
  42. 1268 head_mask=head_mask,
  43. 1269 output_attentions=output_attentions,
  44. 1270 output_hidden_states=output_hidden_states,
  45. 1271 interpolate_pos_encoding=interpolate_pos_encoding,
  46. 1272 return_dict=return_dict,
  47. 1273 )
  48. 1275 pooled_output = outputs[1]
  49. 1277 logits = self.classifier(pooled_output)
  50.  
  51. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  52. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  53. 1738 else:
  54. -> 1739 return self._call_impl(*args, **kwargs)
  55.  
  56. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  57. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  58. 1746 # this function, and just call forward.
  59. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  60. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  61. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  62. -> 1750 return forward_call(*args, **kwargs)
  63. 1752 result = None
  64. 1753 called_always_called_hooks = set()
  65.  
  66. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:1062, in SwinModel.forward(self, pixel_values, bool_masked_pos, head_mask, output_attentions, output_hidden_states, interpolate_pos_encoding, return_dict)
  67. 1056 head_mask = self.get_head_mask(head_mask, len(self.config.depths))
  68. 1058 embedding_output, input_dimensions = self.embeddings(
  69. 1059 pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
  70. 1060 )
  71. -> 1062 encoder_outputs = self.encoder(
  72. 1063 embedding_output,
  73. 1064 input_dimensions,
  74. 1065 head_mask=head_mask,
  75. 1066 output_attentions=output_attentions,
  76. 1067 output_hidden_states=output_hidden_states,
  77. 1068 return_dict=return_dict,
  78. 1069 )
  79. 1071 sequence_output = encoder_outputs[0]
  80. 1072 sequence_output = self.layernorm(sequence_output)
  81.  
  82. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  83. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  84. 1738 else:
  85. -> 1739 return self._call_impl(*args, **kwargs)
  86.  
  87. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  88. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  89. 1746 # this function, and just call forward.
  90. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  91. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  92. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  93. -> 1750 return forward_call(*args, **kwargs)
  94. 1752 result = None
  95. 1753 called_always_called_hooks = set()
  96.  
  97. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:881, in SwinEncoder.forward(self, hidden_states, input_dimensions, head_mask, output_attentions, output_hidden_states, output_hidden_states_before_downsampling, always_partition, return_dict)
  98. 872 layer_outputs = self._gradient_checkpointing_func(
  99. 873 layer_module.__call__,
  100. 874 hidden_states,
  101. (...) 878 always_partition,
  102. 879 )
  103. 880 else:
  104. --> 881 layer_outputs = layer_module(
  105. 882 hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
  106. 883 )
  107. 885 hidden_states = layer_outputs[0]
  108. 886 hidden_states_before_downsampling = layer_outputs[1]
  109.  
  110. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  111. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  112. 1738 else:
  113. -> 1739 return self._call_impl(*args, **kwargs)
  114.  
  115. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  116. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  117. 1746 # this function, and just call forward.
  118. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  119. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  120. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  121. -> 1750 return forward_call(*args, **kwargs)
  122. 1752 result = None
  123. 1753 called_always_called_hooks = set()
  124.  
  125. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:801, in SwinStage.forward(self, hidden_states, input_dimensions, head_mask, output_attentions, always_partition)
  126. 798 for i, layer_module in enumerate(self.blocks):
  127. 799 layer_head_mask = head_mask[i] if head_mask is not None else None
  128. --> 801 layer_outputs = layer_module(
  129. 802 hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
  130. 803 )
  131. 805 hidden_states = layer_outputs[0]
  132. 807 hidden_states_before_downsampling = hidden_states
  133.  
  134. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  135. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  136. 1738 else:
  137. -> 1739 return self._call_impl(*args, **kwargs)
  138.  
  139. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  140. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  141. 1746 # this function, and just call forward.
  142. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  143. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  144. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  145. -> 1750 return forward_call(*args, **kwargs)
  146. 1752 result = None
  147. 1753 called_always_called_hooks = set()
  148.  
  149. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:731, in SwinLayer.forward(self, hidden_states, input_dimensions, head_mask, output_attentions, always_partition)
  150. 726 hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
  151. 727 attn_mask = self.get_attn_mask(
  152. 728 height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
  153. 729 )
  154. --> 731 attention_outputs = self.attention(
  155. 732 hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
  156. 733 )
  157. 735 attention_output = attention_outputs[0]
  158. 737 attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
  159.  
  160. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  161. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  162. 1738 else:
  163. -> 1739 return self._call_impl(*args, **kwargs)
  164.  
  165. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  166. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  167. 1746 # this function, and just call forward.
  168. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  169. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  170. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  171. -> 1750 return forward_call(*args, **kwargs)
  172. 1752 result = None
  173. 1753 called_always_called_hooks = set()
  174.  
  175. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:604, in SwinAttention.forward(self, hidden_states, attention_mask, head_mask, output_attentions)
  176. 597 def forward(
  177. 598 self,
  178. 599 hidden_states: torch.Tensor,
  179. (...) 602 output_attentions: Optional[bool] = False,
  180. 603 ) -> Tuple[torch.Tensor]:
  181. --> 604 self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
  182. 605 attention_output = self.output(self_outputs[0], hidden_states)
  183. 606 outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  184.  
  185. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1739, in Module._wrapped_call_impl(self, *args, **kwargs)
  186. 1737 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
  187. 1738 else:
  188. -> 1739 return self._call_impl(*args, **kwargs)
  189.  
  190. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\torch\nn\modules\module.py:1750, in Module._call_impl(self, *args, **kwargs)
  191. 1745 # If we don't have any hooks, we want to skip the rest of the logic in
  192. 1746 # this function, and just call forward.
  193. 1747 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
  194. 1748 or _global_backward_pre_hooks or _global_backward_hooks
  195. 1749 or _global_forward_hooks or _global_forward_pre_hooks):
  196. -> 1750 return forward_call(*args, **kwargs)
  197. 1752 result = None
  198. 1753 called_always_called_hooks = set()
  199.  
  200. File f:\Desktop\School_Stuff\Programming\AI\.venv\Lib\site-packages\transformers\models\swin\modeling_swin.py:527, in SwinSelfAttention.forward(self, hidden_states, attention_mask, head_mask, output_attentions)
  201. 522 relative_position_bias = relative_position_bias.view(
  202. 523 self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
  203. 524 )
  204. 526 relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
  205. --> 527 attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
  206. 529 if attention_mask is not None:
  207. 530 # Apply the attention mask is (precomputed for all layers in SwinModel forward() function)
  208. 531 mask_shape = attention_mask.shape[0]
  209.  
  210. RuntimeError: The size of tensor a (16) must match the size of tensor b (49) at non-singleton dimension 3
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement