get_target_weights(target, net_output) 2 changes: 1 addition & 1 deletion 2 fairseq/models/wav2vec.py Original file line numberDiff line numberDiff line change @@ -200,7 +200,7 @@ def get_logits(self, net_output): logits = net_output['cpc_logits'] return logits def get_targets(...
rnn.pack_padded_sequence(x, src_lengths, batch_first=True) # Get the output from the LSTM. _outputs, (final_hidden, _final_cell) = self.lstm(x) # Return the Encoder's output. This can be any object and will be # passed directly to the Decoder. return { # this will ...
get_targets(sample, net_output).view(-1) loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx, reduce=reduce) return loss, loss @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" 4 ...
通过torch.cuda.get_device_capablity(0)[0]可以确定GPU是否支持半精度(值小于7则不支持,大于7则支持。)# --no-epoch-checkpoints: 只储存最后和最好的检查点# --save-dir: 训练过程中保存中间模型,默认为checkpoints。# --label-smoothing 0.1:将label_smoothed_cross_entropy损失默认为0的label-smoothing值...
def create_data(source_sents, target_sents): de2idx, idx2de = load_de_vocab() en2idx, idx2en = load_en_vocab() # Index x_list, y_list, Sources, Targets = [], [], [], [] for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) ...
其实发现translaion task其实没有什么东西,全是一些如何加载预训练模型,以及如何加载数据,如何将数据处理成翻译需要的形式,因为主要是继承fairseq_task的功能,而fairseq_task本身就是一个seq2seq,因此只用translation.py实现加载数据什么的就行了。 fairseq_task.py ...
.input_vocab,tgt=labels,tgt_sizes=torch.ones(len(labels)),# targets have length 1tgt_dict=self.label_vocab,left_pad_source=False,max_source_positions=self.args.max_positions,max_target_positions=1,# Since our target is a single class label, there's no need for# input feeding. If we ...
Fine-tuning on 100h of Librispeech with letter targets:$ fairseq-hydra-train \ distributed_training.distributed_port=$PORT \ task.data=/path/to/data \ model.w2v_path=/path/to/model.pt \ --config-dir /path/to/fairseq-py/examples/wav2vec/config/finetuning \ --config-name base_100h ...
服务 加入Gitee 与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :) 免费加入 已有帐号?立即登录 master 分支(24) 标签(14) 管理 管理 master fix_iwslt v0.10.2 hubconf2 add_hydra_dep windows repro_analyzing_uncertainty pipibjc-wmt20 ...
get_targets(sample, [logits]).view(-1) sample_size = targets.numel() if not self.args.regression_target: loss = F.nll_loss( F.log_softmax(logits, dim=-1, dtype=torch.float32), targets, reduction='sum', ) else: logits = logits.squeeze().float() targets = targets.float() loss ...