Tensor) -> torch.Tensor: tab_slice = slice(0, self.tab_incoming_dim) text_slice = slice( self.tab_incoming_dim, self.tab_incoming_dim + self.text_incoming_dim ) image_slice = slice( self.tab_incoming_dim + self.text_incoming_dim, self.tab_incoming_dim + self.text_incoming_dim +...
kwargs) File "C:\Users\user\anaconda3\envs\openmmlab\lib\site-packages\mmdeploy\apis\core\pipeline_manager.py", line 107, in __call__ ret = func(*args, **kwargs) File "C:\Users\user\anaconda3\envs\openmmlab\lib\site-packages\mmdeploy\apis\pytorch2onnx.py", line 64, in torch2...
(It should be just a read-only flag, to allow passing the need of grad_fn to child tensors, independent of whether the gradient actually should be retained in .grad. For retaining, see (2.).) In tensor factories like torch.tensor(), rename the argument requires_grad to retains_grad, ...
Optional[torch.Tensor]]: """Contract the expanded batch back into its original size. @@ -154,6 +157,7 @@ def _contract_batch( contracted_bs is the original batch size, and the batch size that the target_sampler_output will be contracted to. """ contracted_bs = len(contracted_seq_gr...
nn.Linear(output_units * 4, output_units), ) def forward(self, X: torch.Tensor) -> torch.Tensor: tab_slice = slice(0, self.tab_incoming_dim) text_slice = slice( self.tab_incoming_dim, self.tab_incoming_dim + self.text_incoming_dim ) image_slice = slice( self.tab_incoming_dim ...
Tensor) -> torch.Tensor: tab_slice = slice(0, self.tab_incoming_dim) text_slice = slice( self.tab_incoming_dim, self.tab_incoming_dim + self.text_incoming_dim ) image_slice = slice( self.tab_incoming_dim + self.text_incoming_dim, self.tab_incoming_dim + self.text_incoming_dim +...