Llama-3.2-1B-Instruct-tenso.../cheetah-fine-tuning-config.json

83 lines
5.0 KiB
JSON

{
"model": {
"model_path": "/cheetah/input/model/groupuser/Llama-3.2-1B-Instruct",
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct.git",
"model_branch": "main",
"type": "git"
},
"save_model": {
"model_repository": "git@git.dev2.aifrica.co.kr:groupuser/Llama-3.2-1B-Instruct-tensorboard_n_graph.git",
"type": "git"
},
"dataset": {
"dataset_repository": "git@git.dev2.aifrica.co.kr:groupuser/minized_parquet_datasets.git",
"dataset_path": "/cheetah/input/dataset/groupuser/minized_parquet_datasets",
"dataset_files": "*.parquet",
"pre_processing": "ZGVmIF9ub3JtYWxpemVfcm9sZShyYXdfcm9sZSk6CiAgICByb2xlID0gc3RyKHJhd19yb2xlKS5zdHJpcCgpLmxvd2VyKCkKCiAgICBpZiByb2xlIGluICgiaHVtYW4iLCAidXNlciIpOgogICAgICAgIHJldHVybiAidXNlciIKICAgIGlmIHJvbGUgaW4gKCJncHQiLCAiYXNzaXN0YW50IiwgIm1vZGVsIik6CiAgICAgICAgcmV0dXJuICJhc3Npc3RhbnQiCiAgICBpZiByb2xlID09ICJzeXN0ZW0iOgogICAgICAgIHJldHVybiAic3lzdGVtIgoKICAgICMg7JWMIOyImCDsl4bripQgcm9sZeydgCDtlZnsirUg7KSR64uoIOuMgOyLoCBhc3Npc3RhbnTroZwg67O064K07KeAIOunkOqzoCDrqoXtmZXtnogg7Iuk7YyoCiAgICByYWlzZSBWYWx1ZUVycm9yKGYiVW5zdXBwb3J0ZWQgcm9sZToge3Jhd19yb2xlfSIpCgoKZGVmIF9leHRyYWN0X3R1cm4odHVybik6CiAgICAjIOuztO2GtSBwYXJxdWV0IC0+IGRhdGFzZXRzIOyXkOyEnOuKlCBkaWN0IO2Yle2DnOuhnCDrk6TslrTsmLQKICAgIGlmIGlzaW5zdGFuY2UodHVybiwgZGljdCk6CiAgICAgICAgaWYgImZyb20iIGluIHR1cm4gYW5kICJ2YWx1ZSIgaW4gdHVybjoKICAgICAgICAgICAgcmV0dXJuIHsKICAgICAgICAgICAgICAgICJyb2xlIjogX25vcm1hbGl6ZV9yb2xlKHR1cm5bImZyb20iXSksCiAgICAgICAgICAgICAgICAiY29udGVudCI6ICIiIGlmIHR1cm5bInZhbHVlIl0gaXMgTm9uZSBlbHNlIHN0cih0dXJuWyJ2YWx1ZSJdKSwKICAgICAgICAgICAgfQogICAgICAgIGlmICJyb2xlIiBpbiB0dXJuIGFuZCAiY29udGVudCIgaW4gdHVybjoKICAgICAgICAgICAgcmV0dXJuIHsKICAgICAgICAgICAgICAgICJyb2xlIjogX25vcm1hbGl6ZV9yb2xlKHR1cm5bInJvbGUiXSksCiAgICAgICAgICAgICAgICAiY29udGVudCI6ICIiIGlmIHR1cm5bImNvbnRlbnQiXSBpcyBOb25lIGVsc2Ugc3RyKHR1cm5bImNvbnRlbnQiXSksCiAgICAgICAgICAgIH0KCiAgICAjIOyYiOyZuOyggeycvOuhnCBbcm9sZSwgY29udGVudF0g7ZiV7YOc7J28IOuVjOuPhCDsspjrpqwKICAgIGlmIGlzaW5zdGFuY2UodHVybiwgKGxpc3QsIHR1cGxlKSkgYW5kIGxlbih0dXJuKSA9PSAyOgogICAgICAgIHJldHVybiB7CiAgICAgICAgICAgICJyb2xlIjogX25vcm1hbGl6ZV9yb2xlKHR1cm5bMF0pLAogICAgICAgICAgICAiY29udGVudCI6ICIiIGlmIHR1cm5bMV0gaXMgTm9uZSBlbHNlIHN0cih0dXJuWzFdKSwKICAgICAgICB9CgogICAgcmFpc2UgVmFsdWVFcnJvcihmIlVuc3VwcG9ydGVkIGNvbnZlcnNhdGlvbiB0dXJuIGZvcm1hdDoge3R1cm59IikKCgpkZWYgZm9ybWF0dGluZ19mdW5jKGV4YW1wbGUpOgogICAgaWYgImNvbnZlcnNhdGlvbnMiIG5vdCBpbiBleGFtcGxlOgogICAgICAgIHJhaXNlIFZhbHVlRXJyb3IoZiJNaXNzaW5nICdjb252ZXJzYXRpb25zJy4gQXZhaWxhYmxlIGNvbHVtbnM6IHtsaXN0KGV4YW1wbGUua2V5cygpKX0iKQoKICAgIG1lc3NhZ2VzID0gW19leHRyYWN0X3R1cm4odHVybikgZm9yIHR1cm4gaW4gZXhhbXBsZVsiY29udmVyc2F0aW9ucyJdXQoKICAgICMg67mIIOuplOyLnOyngOuKlCDsoJzqsbAKICAgIG1lc3NhZ2VzID0gW20gZm9yIG0gaW4gbWVzc2FnZXMgaWYgbVsiY29udGVudCJdLnN0cmlwKCldCgogICAgaWYgbm90IG1lc3NhZ2VzOgogICAgICAgIHJhaXNlIFZhbHVlRXJyb3IoIk5vIHZhbGlkIG1lc3NhZ2VzIGZvdW5kIGluIGV4YW1wbGUiKQoKICAgIGZvcm1hdHRlZF90ZXh0ID0gdG9rZW5pemVyLmFwcGx5X2NoYXRfdGVtcGxhdGUoCiAgICAgICAgbWVzc2FnZXMsCiAgICAgICAgdG9rZW5pemU9RmFsc2UsCiAgICAgICAgYWRkX2dlbmVyYXRpb25fcHJvbXB0PUZhbHNlLAogICAgKQoKICAgIHJldHVybiB7InRleHQiOiBmb3JtYXR0ZWRfdGV4dH0KCgpmb3Igc3BsaXQgaW4gbGlzdChkYXRhc2V0X2RpY3Qua2V5cygpKToKICAgIGRhdGFzZXRfZGljdFtzcGxpdF0gPSBkYXRhc2V0X2RpY3Rbc3BsaXRdLm1hcCgKICAgICAgICBmb3JtYXR0aW5nX2Z1bmMsCiAgICAgICAgYmF0Y2hlZD1GYWxzZSwKICAgICAgICByZW1vdmVfY29sdW1ucz1kYXRhc2V0X2RpY3Rbc3BsaXRdLmNvbHVtbl9uYW1lcywKICAgICk=",
"dataset_branch": "main",
"type": "git"
},
"peft": {
"target_modules": "all-linear",
"lora_dropout": 0.05,
"quantization": "int4",
"merge_adapter": false,
"r": 16,
"lora_alpha": 32,
"bias": "none",
"task_type": "CAUSAL_LM",
"peft": true,
"tuner": "lora"
},
"tokenizer_parameters": {
"block_size": 1024,
"add_eos_token": true,
"max_length": 2048,
"padding": "right"
},
"train_model_parameters": {
"gradient_checkpointing": true,
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 4,
"dataset_text_field": "text",
"column_mappings": {
"rejected_text_column": null,
"prompt_text_column": "text",
"text_column": null
},
"logging_steps": -1,
"logging_strategy": "epoch",
"use_flash_attention": false,
"eval_strategy": "epoch",
"save_total_limit": 1,
"auto_find_batch_size": true,
"mixed_precision": "fp16",
"learning_rate": 3e-05,
"warmup_ratio": 0.1,
"weight_decay": 0,
"max_grad_norm": 1,
"chat_template": "None",
"max_prompt_length": 2048,
"gradient_accumulation": 4,
"max_completion_length": 2048,
"distributed_backend": "None",
"num_train_epochs": 1,
"model_ref": "",
"dpo_beta": 0.1,
"use_fsdp2": false,
"disable_gc": false,
"unsloth": "false",
"do_train": true,
"do_predict": true,
"do_eval": true,
"optimizer": "adamw_torch",
"scheduler": "linear",
"batch_size": 2,
"log": "tensorboard",
"output_dir": "/cheetah/fine-tuning/output",
"seed": 42
},
"trainer": "sft",
"experiment_name": "tng-8"
}