dbinc-Llama-3.2-1B-Instruct.../cheetah-fine-tuning-config.json

73 lines
3.1 KiB
JSON

{
"trainer": "sft",
"experiment_name": "test-4",
"fine_tuning_image": "registry.aifrica.co.kr/cheetah/cheetahai-finetuning-pytorch-transformer:0.0.14-2.8.0-cuda12.8-cudnn9-devel-4.57.6",
"model": {
"model_path": "/cheetah/input/model/groupuser/Llama-3.2-1B-Instruct",
"model_branch": "main",
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct.git",
"type": "git"
},
"save_model": {
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/dbinc-Llama-3.2-1B-Instruct-finetuning.git",
"type": "git"
},
"dataset": {
"dataset_path": "/cheetah/input/dataset/dataset-dvjvydwm-zokjnzjp",
"dataset_branch": null,
"dataset_files": "*.jsonl",
"pre_processing": "ZnJvbSBkYXRhc2V0cyBpbXBvcnQgRGF0YXNldERpY3QKaW1wb3J0IGpzb24KCmRlZiBwcmVwcm9jZXNzKGV4YW1wbGUpOgogICAgdGFza2luZm8gPSBleGFtcGxlWyJ0YXNraW5mbyJdCiAgICBpZiBpc2luc3RhbmNlKHRhc2tpbmZvLCBzdHIpOgogICAgICAgIHRhc2tpbmZvID0ganNvbi5sb2Fkcyh0YXNraW5mbykKICAgIAogICAgc2VudGVuY2VzID0gdGFza2luZm8uZ2V0KCJzZW50ZW5jZXMiKSBvciBbXQogICAgY29udGV4dCA9ICIiLmpvaW4oc2VudGVuY2VzKS5zdHJpcCgpCiAgICBpbnB1dF90ZXh0ID0gKHRhc2tpbmZvLmdldCgiaW5wdXQiKSBvciAiIikuc3RyaXAoKQogICAgb3V0cHV0X3RleHQgPSAodGFza2luZm8uZ2V0KCJvdXRwdXQiKSBvciAiIikuc3RyaXAoKQogICAgCiAgICB1c2VyX2NvbnRlbnQgPSBmIuuLpOydjCDtjJDqsrDrrLjsnYQg7LC46rOg7ZWY7JesIOyniOusuOyXkCDri7XtlZjshLjsmpQuXG5cbntjb250ZXh0fVxuXG7sp4jrrLg6IHtpbnB1dF90ZXh0fSIKICAgIAogICAgbWVzc2FnZXMgPSBbCiAgICAgICAgeyJyb2xlIjogInVzZXIiLCAiY29udGVudCI6IHVzZXJfY29udGVudH0sCiAgICAgICAgeyJyb2xlIjogImFzc2lzdGFudCIsICJjb250ZW50Ijogb3V0cHV0X3RleHR9LAogICAgXQogIAogICAgcmV0dXJuIHsibWVzc2FnZXMiOiBtZXNzYWdlc30KCmRhdGFzZXRfZGljdCA9IERhdGFzZXREaWN0KHsKICAgIHNwbGl0OiBkYXRhc2V0X2RpY3Rbc3BsaXRdLm1hcCgKICAgICAgICBwcmVwcm9jZXNzLAogICAgICAgIHJlbW92ZV9jb2x1bW5zPWRhdGFzZXRfZGljdFtzcGxpdF0uY29sdW1uX25hbWVzLAogICAgKQogICAgZm9yIHNwbGl0IGluIGRhdGFzZXRfZGljdAp9KQ==",
"dataset_repository": null,
"type": "volume"
},
"peft": {
"tuner": "lora",
"r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"bias": "none",
"task_type": "CAUSAL_LM",
"peft": true,
"target_modules": "all-linear",
"merge_adapter": false
},
"tokenizer_parameters": {
"max_length": 2048,
"padding_side": "right",
"add_eos_token": true
},
"train_model_parameters": {
"torch_dtype": "bfloat16",
"fp16": false,
"bf16": true,
"packing": false,
"optim": "adamw_torch",
"weight_decay": 0,
"max_grad_norm": 1,
"warmup_ratio": 0.1,
"eval_steps": 500,
"save_steps": 500,
"report_to": "tensorboard",
"do_train": true,
"do_eval": true,
"per_device_train_batch_size": 2,
"per_device_eval_batch_size": 2,
"gradient_accumulation_steps": 4,
"gradient_checkpointing": true,
"output_dir": "/cheetah/fine-tuning/output",
"use_flash_attention": false,
"dataset_text_field": "",
"max_seq_length": 2048,
"num_train_epochs": 1,
"auto_find_batch_size": false,
"learning_rate": 0.0002,
"lr_scheduler_type": "linear",
"eval_strategy": "epoch",
"save_strategy": "epoch",
"save_total_limit": 1,
"logging_strategy": "epoch",
"logging_steps": 10,
"seed": 42
}
}