logsumexp(logit_p, dim=1) + torch.logsumexp(logit_n, dim=1)).mean() return loss Example #2Source File: modules.py From BAMnet with Apache License 2.0 6 votes def enc_ans_features(self, x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_...
'''# ans_types = torch.mean(self.ent_type_embed(x_types.view(-1, x_types.size(-1))), 1).view(x_types.size(0), x_types.size(1), -1)ans_type_bow = (self.lstm_enc_type(x_type_bow.view(-1, x_type_bow.size(-1)), x_type_bow_len.view(-1))[1]).view(x_type_bow....
log_softmax(input, dim=1) loss += F.nll_loss(log_probs, Variable(target)) # regularization if self.label_smooth > 0.: loss -= (log_probs.mean() + self.kld_u_const) * self.label_smooth return loss Example #4Source File: nn_lib.py From ConvLab with MIT License 6 votes def...
mean = torch.mean(input, dim=[0,2,3]) meansqr = torch.mean(input * input, dim=[0,2,3]) vec = torch.cat([mean, meansqr], dim=0) vec = AllReduce.apply(vec) * (1.0/ dist.get_world_size()) mean, meansqr = torch.split(vec, C) var = meansqr - mean * mean self.runnin...
h(torch.cat([y, z], -1)) p.loss(binary_cross_entropy, self.x_mean(h), x, name='x') return p The model above can be used both for conditioned forward execution, but also for generation. The reason for this is that q[k] returns None for variable names k that have not been ...
())ify_valuesisnotNone:y_values=y_values.expand(num_samples,*y_values.size())h=self.h(x)y=q.concrete(logits=self.y_log_weights(h),temperature=0.66,value=y_values,name='y')h2=torch.cat([y,h],-1)z=q.normal(loc=self.z_mean(h2),scale=torch.exp(self.z_log_std(h2)),name='...
mean((tmp - tags[-1].expand_as(tmp))**2) if len(tags) == 0: return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float()) tags = torch.stack(tags)[:,0] num = tags.size()[0] size = (num, num, tags.size()[1]) A = tags.unsqueeze(dim=1)....
如果两个引用指向的不是同一个对象,那么==就不成立,即便两个引用的内容是一样的。因此,结果 ...
如果两个引用指向的不是同一个对象,那么==就不成立,即便两个引用的内容是一样的。因此,结果 ...
() self.size = d_model # 层归一化包含两个可以学习的参数 self.alpha = nn.Parameter(torch.ones(self.size)) self.bias = nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \ / (x.std(dim=-1,...