Llama-3.2-1B-Instruct-finet.../cheetah-fine-tuning-config.json

83 lines
3.4 KiB
JSON

{
"train_model_parameters": {
"num_train_epochs": 1,
"model_ref": "",
"dpo_beta": 0.1,
"use_fsdp2": false,
"disable_gc": false,
"unsloth": "false",
"do_train": true,
"do_predict": true,
"do_eval": true,
"gradient_checkpointing": true,
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 4,
"gradient_accumulation": 4,
"max_completion_length": 512,
"distributed_backend": "None",
"dataset_text_field": "text",
"column_mappings": {
"rejected_text_column": null,
"text_column": null,
"prompt_text_column": "test"
},
"logging_steps": 10,
"logging_strategy": "steps",
"use_flash_attention": false,
"eval_strategy": "epoch",
"save_total_limit": 1,
"auto_find_batch_size": true,
"mixed_precision": "fp16",
"learning_rate": 3e-05,
"warmup_ratio": 0.1,
"weight_decay": 0,
"max_grad_norm": 1,
"chat_template": "None",
"max_prompt_length": 512,
"optimizer": "adamw_torch",
"scheduler": "linear",
"batch_size": 2,
"output_dir": "/cheetah/fine-tuning/output",
"log": "tensorboard",
"seed": 42
},
"peft": {
"target_modules": "all-linear",
"lora_dropout": 0.05,
"quantization": "int4",
"merge_adapter": false,
"tuner": "lora",
"task_type": "CAUSAL_LM",
"lora_alpha": 32,
"r": 16,
"peft": true,
"bias": "none"
},
"tokenizer_parameters": {
"add_eos_token": true,
"max_length": 512,
"block_size": 512,
"padding": "right"
},
"dataset": {
"dataset_repository": "git@git.dev2.aifrica.co.kr:groupuser/FineTome-100k_obj_train.git",
"dataset_path": "/cheetah/input/dataset/groupuser/FineTome-100k_obj_train",
"dataset_files": "*.parquet",
"pre_processing": "ZGVmIGZvcm1hdHRpbmdfZnVuYyhleGFtcGxlKToKICAgICMgMS4gU2hhcmVHUFQoZnJvbS92YWx1ZSkg7ZiV7Iud7J2EIO2RnOykgCByb2xlL2NvbnRlbnQg7ZiV7Iud7Jy866GcIOuzgO2ZmAogICAgbWVzc2FnZXMgPSBbCiAgICAgICAgewogICAgICAgICAgICAicm9sZSI6ICJ1c2VyIiBpZiBtc2dbImZyb20iXSA9PSAiaHVtYW4iIGVsc2UgImFzc2lzdGFudCIsCiAgICAgICAgICAgICJjb250ZW50IjogbXNnWyJ2YWx1ZSJdCiAgICAgICAgfQogICAgICAgIGZvciBtc2cgaW4gZXhhbXBsZVsiY29udmVyc2F0aW9ucyJdCiAgICBdCiAgICAKICAgICMgMi4gTGxhbWEtMyDthqDtgazrgpjsnbTsoIDsnZgg7YWc7ZSM66a/IOyggeyaqSAo7Yq57IiYIO2GoO2BsCDsgr3snoUpCiAgICAjIHRva2VuaXplPUZhbHNl66GcIOyEpOygle2VmOyXrCDtlZnsirUg7KCEIO2FjeyKpO2KuCDtmJXtg5zroZwg67OA7ZmY7ZWp64uI64ukLgogICAgcmV0dXJuIHsidGV4dCI6IHRva2VuaXplci5hcHBseV9jaGF0X3RlbXBsYXRlKG1lc3NhZ2VzLCB0b2tlbml6ZT1GYWxzZSwgYWRkX2dlbmVyYXRpb25fcHJvbXB0PUZhbHNlKX0KCiMgZGF0YXNldF9kaWN07J2YIOuqqOuToCDsiqTtlIzrpr8odHJhaW4sIHRlc3Qp7JeQIOuMgO2VtCDrp6TtlZEg7Iuk7ZaJCmZvciBzcGxpdCBpbiBkYXRhc2V0X2RpY3Qua2V5cygpOgogICAgZGF0YXNldF9kaWN0W3NwbGl0XSA9IGRhdGFzZXRfZGljdFtzcGxpdF0ubWFwKAogICAgICAgIGZvcm1hdHRpbmdfZnVuYywgCiAgICAgICAgYmF0Y2hlZD1GYWxzZSwKICAgICAgICByZW1vdmVfY29sdW1ucz1kYXRhc2V0X2RpY3Rbc3BsaXRdLmNvbHVtbl9uYW1lcyAjIOq4sOyhtCDsu6zrn7wg7KCc6rGw7ZWY6rOgICd0ZXh0J+unjCDrgqjquYAKICAgICk=",
"dataset_branch": "main",
"type": "git"
},
"model": {
"model_path": "/cheetah/input/model/groupuser/Llama-3.2-1B-Instruct",
"model_branch": "main",
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct.git",
"type": "git"
},
"save_model": {
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct-finetuning-parquet.git",
"type": "git"
},
"experiment_name": "fine-1-2",
"trainer": "sft"
}