Remove hslayers.js file everywhere since we use webpack 7eecae5· Jul 17, 2019 HistoryHistoryFolders and files Name Last commit message Last commit date parent directory .. app.js Remove old and irrelevant examples Jul 17, 2019 index.html Fix paths, add package.json and remove otn example...
Interfaces ICadDrawingDataset ICadDrawingLayers ICadDrawingWorkspace ICadSettings ICadTransformations IDataLicenseInfo IDataLicenseManager IMetaInfo ISdcExporter IUsageModeInfo IUsageModeOption Classes BDConnectionWorkspaceFactoryClass BimFileWorkspaceClass BimFileWorkspaceFactoryClass BimModelObjectClass CadDrawingNameClass...
DirectoryRecordSequence Field DirectoryRecordType Field DischargeDate Field DischargeDiagnosisCodeSequence Field DischargeDiagnosisDescription Field DischargeTime Field DisplayCalibrationResultSequence Field DisplayDeviceTypeCodeSequence Field DisplayedAreaBottomRightHandCorner Field DisplayedAreaBottomRightHand...
self.remote =Falseelifself.wms_server:assertself.wms_layers, _("Request at least one layer") self.reader = WMSReader(self.wms_server, self.wms_layers, self.tile_size, **self.wms_options) basename ='-'.join(self.wms_layers) self.remote =Falseelifself.stylefile:asserthas_mapnik, _("Can...
# we do not freeze the norm layers, as suggested by https://arxiv.org/abs/2103.05247 if 'norm' in name: continue else: p.requires_grad_(False) def forward_encoder(self, x: torch.Tensor, lead_times: torch.Tensor, variables): # x: `[B, T, V, H, W]` shape. if isinstance(vari...
TRAIN_DATA="data/RACE/train/middle" VALID_DATA="data/RACE/dev/middle \ data/RACE/dev/high" VOCAB_FILE=bert-vocab.txt PRETRAINED_CHECKPOINT=checkpoints/bert_345m CHECKPOINT_PATH=checkpoints/bert_345m_race COMMON_TASK_ARGS="--num-layers 24 \ --hidden-size 1024 \ --num-attention-heads 16...
TRAIN_DATA="data/RACE/train/middle" VALID_DATA="data/RACE/dev/middle \ data/RACE/dev/high" VOCAB_FILE=bert-vocab.txt PRETRAINED_CHECKPOINT=checkpoints/bert_345m CHECKPOINT_PATH=checkpoints/bert_345m_race COMMON_TASK_ARGS="--num-layers 24 \ --hidden-size 1024 \ --num-attention-heads 16...
--num-layers 12 \ --hidden-size 768 \ --num-attention-heads 12 \ --patch-dim 4 \ --seq-length 3136 \ --max-position-embeddings 3136 \ --img-h 224 \ --img-w 224 \ --mask-factor 1.0 \ --fp16 \ --train-iters 750000 \ --lr-decay-style cosine \ --micro-batch-size 4 ...
# we do not freeze the norm layers, as suggested by https://arxiv.org/abs/2103.05247 if 'norm' in name: continue else: p.requires_grad_(False) def forward_encoder(self, x: torch.Tensor, lead_times: torch.Tensor, variables): # x: `[B, T, V, H, W]` shape. if isinstance(vari...