config = PEFT_TYPE_TO_CONFIG_MAPPING[PeftConfig.from_pretrained(model_id).peft_type].from_pretrained(model_id) File "/data/candowu/miniconda3/envs/llama/lib/python3.9/site-packages/peft/utils/config.py", line 101, in from_pretrained ...
@@ -826,18 +831,9 @@ def get_default_train_config( train_mode: console_consts.TrainMode, peft_type: Optional[PeftType] = None, ) -> TrainConfig: if train_mode == console_consts.TrainMode.PostPretrain: model_info = DefaultPostPretrainTrainConfigMapping.get( model_type, ) elif train...
type:meta-llama/Meta-Llama-3-70B use_fast:true native_amp_init_scale:4294967296 native_amp_growth_interval:1000 hysteresis:2 fp32_residual_connection:false fp16_lm_cross_entropy:false megatron_amp_O2:true grad_allreduce_chunk_size_mb:125 ...
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] if len(peft_config.target_modules) == 1: peft_config.fan_in_fan_out = True peft_config.enable_lora = [True, False, True] if peft_config.inference_mode: peft_config.merge_weights = True return peft_config ...
get_peft_model(self.backbone, lora_config) File "/home/naren/.local/share/virtualenvs/h2o-llmstudio-IuguWLXF/lib/python3.10/site-packages/peft/mapping.py", line 120, in get_peft_model return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config) File "/home/naren...
Traceback (most recent call last): File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py", line 1128, in from_pretrained config_class = CONFIG_MAPPING[config_dict["model_type"]] File "/root/miniconda3/envs/py3.10/lib/python3.10/site...
test_deepspeed.py::TestDeepSpeedWithLauncher::test_basic_distributed_zero3_fp16fail, with the same error as stated. Please try running with:CUDA_VISIBLE_DEVICES="0,1" RUN_SLOW="yes" ACCELERATE_USE_DEEPSPEED="yes" pytest -sv tests/deepspeed/test_deepspeed.py -k test_basic_distributedto ...