arrow-test/cheetah-fine-tuning-config.json

62 lines
1.8 KiB
JSON

{
"trainer": "sft",
"experiment_name": "vvgr-1",
"fine_tuning_image": "registry.aifrica.co.kr/cheetah/cheetahai-finetuning-pytorch-transformer:0.0.13-2.8.0-cuda12.8-cudnn9-devel-4.57.6",
"model": {
"model_branch": "main",
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct.git",
"model_path": "/cheetah/input/model/groupuser/Llama-3.2-1B-Instruct",
"type": "git"
},
"save_model": {
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/arrow-test.git",
"type": "git"
},
"dataset": {
"dataset_branch": "main",
"dataset_files": "data/*.arrow",
"pre_processing": "",
"dataset_repository": "git@git.dev2.aifrica.co.kr:groupuser/kicon-small.git",
"dataset_path": "/cheetah/input/dataset/groupuser/kicon-small",
"type": "git"
},
"tokenizer_parameters": {
"padding_side": "right",
"add_eos_token": true,
"max_length": 2048
},
"train_model_parameters": {
"per_device_train_batch_size": 2,
"per_device_eval_batch_size": 2,
"gradient_accumulation_steps": 4,
"gradient_checkpointing": true,
"output_dir": "/cheetah/fine-tuning/output",
"use_flash_attention": false,
"dataset_text_field": "text",
"max_seq_length": 2048,
"num_train_epochs": 1,
"auto_find_batch_size": false,
"learning_rate": 3e-05,
"lr_scheduler_type": "linear",
"eval_strategy": "epoch",
"save_strategy": "epoch",
"save_total_limit": 1,
"logging_strategy": "epoch",
"logging_steps": 10,
"torch_dtype": "bfloat16",
"fp16": false,
"bf16": true,
"packing": false,
"optim": "adamw_torch",
"weight_decay": 0,
"max_grad_norm": 1,
"warmup_ratio": 0.1,
"eval_steps": 500,
"save_steps": 500,
"report_to": "tensorboard",
"do_train": true,
"do_eval": true,
"seed": 42
}
}