Llama-3.2-1B-Instruct-finet.../cheetah-fine-tuning-config.json

80 lines
2.1 KiB
JSON

{
"dataset": {
"dataset_repository": null,
"dataset_path": "/cheetah/input/dataset/dataset-gpmwqrmr-erpmxkjl",
"dataset_branch": null,
"type": "volume"
},
"peft": {
"lora_alpha": 32,
"tuner": "lora",
"r": 16,
"target_modules": "all-linear",
"lora_dropout": 0.05,
"bias": "none",
"task_type": "CAUSAL_LM",
"quantization": "int4",
"peft": true,
"merge_adapter": null
},
"tokenizer_parameters": {
"add_eos_token": true,
"padding": "right",
"max_length": 2048,
"block_size": 1024
},
"train_model_parameters": {
"num_train_epochs": 1,
"gradient_checkpointing": true,
"dataset_text_field": "text",
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 4,
"column_mappings": {
"prompt_text_column": "",
"text_column": "text",
"rejected_text_column": "text"
},
"logging_steps": -1,
"logging_strategy": "epoch",
"use_flash_attention": false,
"evaluation_strategy": "epoch",
"save_total_limit": 1,
"auto_find_batch_size": true,
"mixed_precision": "fp16",
"learning_rate": 3e-05,
"warmup_ratio": 0.1,
"gradient_accumulation": 4,
"weight_decay": 0,
"max_grad_norm": 1,
"chat_template": "None",
"model_ref": "",
"dpo_beta": 0.1,
"max_prompt_length": null,
"max_completion_length": null,
"distributed_backend": "None",
"use_fsdp2": false,
"disable_gc": false,
"unsloth": "false",
"do_train": true,
"do_predict": true,
"optimizer": "adamw_torch",
"batch_size": 2,
"output_dir": "/cheetah/fine-tuning/output",
"scheduler": "linear",
"log": "tensorboard",
"seed": 42
},
"model": {
"model_path": "/cheetah/input/model/groupuser/Llama-3.2-1B-Instruct",
"model_branch": "main",
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct.git",
"type": "git"
},
"save_model": {
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct-finetuning-s3.git",
"type": "git"
},
"trainer": "sft",
"experiment_name": "exp-6"
}