save_hyperparameters() self.backbone = MyResNet(num_classes) self.train_accuracy = torchmetrics.Accuracy() def forward(self, x): embedding = self.backbone(x) return embedding def training_step(self, batch, batch
[LightningModule.CHECKPOINT_HYPER_PARAMS_TYPE]=type(model.hparams)else:checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]=dict(model.hparams)# give the model a chance to add a few thingsmodel.on_save_checkpoint(checkpoint)ifself.trainer.datamoduleisnotNone:self.trainer.datamodule.on_save_...
sshleifer Jun 8, 2020 • edited Tried that, get better traceback but no solution: KeyError: 'Trying to restore training state but checkpoint contains only the model. This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.' ...