Compare commits
No commits in common. "refs/deployment/triton" and "main" have entirely different histories.
refs/deplo
...
main
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
255
1/model.py
255
1/model.py
@ -1,255 +0,0 @@
|
||||
"""
|
||||
[Transformer-LLM 백엔드 가이드]
|
||||
|
||||
본 파일은 NVIDIA Triton Server에서 Hugging Face `AutoModelForCausalLM` 기반 모델을 손쉽게 배포하기 위해 제공되는 커스텀 Python 백엔드 템플릿입니다.
|
||||
|
||||
1. 모델 호환성
|
||||
- Hugging Face의 `AutoModelForCausalLM` 클래스와 호환되는 모든 Causal Language Model을 지원합니다.
|
||||
- [확인] 배포할 모델 `config.json`의 `architectures` 항목이 `...ForCausalLM` 형식인지 확인.
|
||||
- [확인] 모델이 Hugging Face 공식 문서의 AutoModelForCausalLM이 지원하는 모델인지 확인.
|
||||
(https://huggingface.co/docs/transformers/en/model_doc/auto#transformers.AutoModelForCausalLM.from_pretrained)
|
||||
|
||||
2. 토크나이저 호환성
|
||||
- `AutoTokenizer`와 호환되는 토크나이저를 지원하며, 모델과 동일한 경로에서 자동으로 로드됩니다.
|
||||
|
||||
3. 커스터마이징 안내
|
||||
- 본 템플릿은 범용적인 사용을 위해 작성되었습니다.
|
||||
- 특정 모델의 동작 방식이나 예외 처리가 필요한 경우, 이 파일(`model.py`)과 설정 파일(`config.pbtxt`)을 직접 수정하여 사용하시기 바랍니다.
|
||||
"""
|
||||
|
||||
import json
|
||||
import torch
|
||||
import numpy as np
|
||||
import triton_python_backend_utils as pb_utils
|
||||
import uuid
|
||||
from typing import List, Dict, Any, Union, Tuple
|
||||
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
GenerationConfig,
|
||||
BitsAndBytesConfig,
|
||||
)
|
||||
from peft import PeftModel, PeftConfig
|
||||
|
||||
class TritonPythonModel:
|
||||
def initialize(self, args: Dict[str, str]):
|
||||
"""
|
||||
모델 초기화: 설정 로드, 로거 설정, 모델 및 토크나이저 로드
|
||||
"""
|
||||
self.logger = pb_utils.Logger
|
||||
self.model_config = json.loads(args["model_config"])
|
||||
self.model_name = args["model_name"]
|
||||
|
||||
# 설정 파라미터 로드
|
||||
self.base_model_path = self._get_config_param("base_model_path")
|
||||
self.is_adapter_model = self._get_config_param("is_adapter_model", "false").lower() == "true"
|
||||
self.adapter_model_path = self._get_config_param("adapter_model_path")
|
||||
self.quantization = self._get_config_param("quantization")
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
# 설정 로그 출력
|
||||
self.logger.log_info(f"================ {self.model_name} Setup ================")
|
||||
self.logger.log_info(f"Base Model: {self.base_model_path}")
|
||||
self.logger.log_info(f"Adapter Mode: {self.is_adapter_model} ({self.adapter_model_path})")
|
||||
self.logger.log_info(f"Quantization: {self.quantization}")
|
||||
self.logger.log_info(f"Device: {self.device}")
|
||||
|
||||
self._load_model_and_tokenizer()
|
||||
self.logger.log_info(f"Model initialized successfully.")
|
||||
|
||||
def _load_model_and_tokenizer(self):
|
||||
"""모델과 토크나이저를 로드하고 설정합니다."""
|
||||
# 1. Quantization 설정
|
||||
bnb_config = self._get_bnb_config()
|
||||
|
||||
# 2. Base Model 로드
|
||||
load_path = self.base_model_path
|
||||
if self.is_adapter_model:
|
||||
peft_config = PeftConfig.from_pretrained(self.adapter_model_path)
|
||||
load_path = peft_config.base_model_name_or_path
|
||||
|
||||
try:
|
||||
self.model = AutoModelForCausalLM.from_pretrained(
|
||||
load_path,
|
||||
torch_dtype="auto",
|
||||
quantization_config=bnb_config,
|
||||
device_map="auto",
|
||||
local_files_only=True,
|
||||
trust_remote_code=True
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.log_error(f"Failed to load base model: {e}")
|
||||
raise e
|
||||
|
||||
# 3. Adapter 병합 (필요 시)
|
||||
if self.is_adapter_model:
|
||||
self.model = PeftModel.from_pretrained(self.model, self.adapter_model_path)
|
||||
|
||||
self.model.eval()
|
||||
|
||||
# 4. Tokenizer 로드
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(load_path, trust_remote_code=True)
|
||||
|
||||
if self.tokenizer.pad_token is None:
|
||||
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
||||
self.logger.log_info("Pad token was None. Set to EOS token.")
|
||||
|
||||
self.supports_chat_template = (
|
||||
hasattr(self.tokenizer, "chat_template") and
|
||||
self.tokenizer.chat_template is not None
|
||||
)
|
||||
|
||||
self.logger.log_info(f"Supports Chat Template: {self.supports_chat_template}")
|
||||
if self.supports_chat_template:
|
||||
self.logger.log_info(f"Chat Template Content:\n{self.tokenizer.chat_template}")
|
||||
|
||||
def _get_bnb_config(self) -> Union[BitsAndBytesConfig, None]:
|
||||
if self.quantization == "int4":
|
||||
return BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.float16
|
||||
)
|
||||
elif self.quantization == "int8":
|
||||
return BitsAndBytesConfig(
|
||||
load_in_8bit=True,
|
||||
llm_int8_threshold=6.0,
|
||||
llm_int8_has_fp16_weight=True
|
||||
)
|
||||
return None
|
||||
|
||||
def execute(self, requests):
|
||||
"""Triton Inference Request 처리 메인 루프"""
|
||||
responses = []
|
||||
|
||||
for request in requests:
|
||||
# [ID 생성 로직] - 로그 추적용으로 유지 (Response에는 포함 X)
|
||||
request_id = request.request_id()
|
||||
if not request_id:
|
||||
request_id = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
# 1. 입력 데이터 파싱
|
||||
input_data, is_chat = self._parse_input(request)
|
||||
|
||||
# [LOGGING] Request ID 포함하여 로그 출력
|
||||
log_input_str = json.dumps(input_data, ensure_ascii=False) if isinstance(input_data, (list, dict)) else str(input_data)
|
||||
self.logger.log_info(f"\n[RID: {request_id}] >>> [{'CHAT' if is_chat else 'TEXT'}][Input]: {log_input_str}")
|
||||
|
||||
# 2. Generation Config 생성
|
||||
gen_config = self._create_generation_config(request)
|
||||
|
||||
# 3. 토크나이징
|
||||
inputs = self._tokenize(input_data, is_chat)
|
||||
|
||||
# 4. 모델 추론 (Generate)
|
||||
output_text = self._generate(inputs, gen_config)
|
||||
|
||||
# [LOGGING] Request ID 포함하여 결과 출력
|
||||
self.logger.log_info(f"\n[RID: {request_id}] <<< [Output]: {output_text}")
|
||||
|
||||
# 5. 응답 생성
|
||||
responses.append(self._create_response(output_text, request_id))
|
||||
|
||||
except Exception as e:
|
||||
self.logger.log_error(f"[RID: {request_id}] Error during execution: {e}")
|
||||
err_tensor = pb_utils.Tensor("text_output", np.array([str(e).encode('utf-8')], dtype=np.bytes_))
|
||||
responses.append(pb_utils.InferenceResponse(output_tensors=[err_tensor]))
|
||||
|
||||
return responses
|
||||
|
||||
def _parse_input(self, request) -> Tuple[Union[str, List[Dict]], bool]:
|
||||
input_text = self._get_input_scalar(request, "text_input")
|
||||
try:
|
||||
conversation = json.loads(input_text)
|
||||
if isinstance(conversation, list):
|
||||
return conversation, True
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
return input_text, False
|
||||
|
||||
def _tokenize(self, input_data, is_chat: bool):
|
||||
if self.supports_chat_template and is_chat:
|
||||
return self.tokenizer.apply_chat_template(
|
||||
input_data,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_tensors="pt",
|
||||
return_dict=True
|
||||
).to(self.device)
|
||||
else:
|
||||
if is_chat:
|
||||
input_data = str(input_data)
|
||||
return self.tokenizer(input_data, return_tensors="pt").to(self.device)
|
||||
|
||||
def _generate(self, inputs, gen_config: GenerationConfig) -> str:
|
||||
input_ids = inputs["input_ids"]
|
||||
input_len = input_ids.shape[-1]
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = self.model.generate(
|
||||
**inputs,
|
||||
generation_config=gen_config,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
eos_token_id=self.tokenizer.eos_token_id
|
||||
)
|
||||
|
||||
generated_tokens = outputs[0][input_len:]
|
||||
decoded_output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
||||
return decoded_output.strip()
|
||||
|
||||
def _create_generation_config(self, request) -> GenerationConfig:
|
||||
def get_param(name, default=None, cast_type=None):
|
||||
val = self._get_input_scalar(request, name, default)
|
||||
if val is not None and cast_type:
|
||||
return cast_type(val)
|
||||
return val
|
||||
|
||||
return GenerationConfig(
|
||||
max_length=get_param("max_length", 1024, int),
|
||||
max_new_tokens=get_param("max_new_tokens", 256, int),
|
||||
temperature=get_param("temperature", 1.0, float),
|
||||
do_sample=get_param("do_sample", False, bool),
|
||||
top_k=get_param("top_k", 50, int),
|
||||
top_p=get_param("top_p", 1.0, float),
|
||||
repetition_penalty=get_param("repetition_penalty", 1.0, float),
|
||||
)
|
||||
|
||||
def _create_response(self, output_text: str, request_id: str):
|
||||
"""생성된 텍스트를 Triton Response 객체로 변환"""
|
||||
output_tensor = pb_utils.Tensor(
|
||||
"text_output",
|
||||
np.array([output_text.encode('utf-8')], dtype=np.bytes_)
|
||||
)
|
||||
return pb_utils.InferenceResponse(output_tensors=[output_tensor])
|
||||
|
||||
def _get_config_param(self, key: str, default: str = None) -> str:
|
||||
params = self.model_config.get('parameters', {})
|
||||
if key in params:
|
||||
return params[key].get('string_value', default)
|
||||
return default
|
||||
|
||||
def _get_input_scalar(self, request, name: str, default=None):
|
||||
tensor = pb_utils.get_input_tensor_by_name(request, name)
|
||||
if tensor is None:
|
||||
return default
|
||||
return self._np_decoder(tensor.as_numpy()[0])
|
||||
|
||||
def _np_decoder(self, obj):
|
||||
if isinstance(obj, bytes):
|
||||
return obj.decode('utf-8')
|
||||
if np.issubdtype(obj, np.integer):
|
||||
return int(obj)
|
||||
if np.issubdtype(obj, np.floating):
|
||||
return round(float(obj), 3)
|
||||
if isinstance(obj, np.bool_):
|
||||
return bool(obj)
|
||||
|
||||
def finalize(self):
|
||||
self.logger.log_info(f"Finalizing model {self.model_name}")
|
||||
self.model = None
|
||||
self.tokenizer = None
|
||||
torch.cuda.empty_cache()
|
||||
66
README.md
Normal file
66
README.md
Normal file
@ -0,0 +1,66 @@
|
||||
---
|
||||
license: apache-2.0
|
||||
datasets:
|
||||
- cerebras/SlimPajama-627B
|
||||
- bigcode/starcoderdata
|
||||
- HuggingFaceH4/ultrachat_200k
|
||||
- HuggingFaceH4/ultrafeedback_binarized
|
||||
language:
|
||||
- en
|
||||
widget:
|
||||
- example_title: Fibonacci (Python)
|
||||
messages:
|
||||
- role: system
|
||||
content: You are a chatbot who can help code!
|
||||
- role: user
|
||||
content: Write me a function to calculate the first 10 digits of the fibonacci sequence in Python and print it out to the CLI.
|
||||
---
|
||||
<div align="center">
|
||||
|
||||
# TinyLlama-1.1B
|
||||
</div>
|
||||
|
||||
https://github.com/jzhang38/TinyLlama
|
||||
|
||||
The TinyLlama project aims to **pretrain** a **1.1B Llama model on 3 trillion tokens**. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs 🚀🚀. The training has started on 2023-09-01.
|
||||
|
||||
|
||||
We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint.
|
||||
|
||||
#### This Model
|
||||
This is the chat model finetuned on top of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T). **We follow [HF's Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)'s training recipe.** The model was " initially fine-tuned on a variant of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT.
|
||||
We then further aligned the model with [🤗 TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contain 64k prompts and model completions that are ranked by GPT-4."
|
||||
|
||||
|
||||
#### How to use
|
||||
You will need the transformers>=4.34
|
||||
Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) github page for more information.
|
||||
|
||||
```python
|
||||
# Install transformers from source - only needed for versions <= v4.34
|
||||
# pip install git+https://github.com/huggingface/transformers.git
|
||||
# pip install accelerate
|
||||
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
|
||||
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a friendly chatbot who always responds in the style of a pirate",
|
||||
},
|
||||
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
|
||||
]
|
||||
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
||||
print(outputs[0]["generated_text"])
|
||||
# <|system|>
|
||||
# You are a friendly chatbot who always responds in the style of a pirate.</s>
|
||||
# <|user|>
|
||||
# How many helicopters can a human eat in one sitting?</s>
|
||||
# <|assistant|>
|
||||
# ...
|
||||
```
|
||||
26
config.json
Normal file
26
config.json
Normal file
@ -0,0 +1,26 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 5632,
|
||||
"max_position_embeddings": 2048,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 22,
|
||||
"num_key_value_heads": 4,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 10000.0,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.35.0",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32000
|
||||
}
|
||||
131
config.pbtxt
131
config.pbtxt
@ -1,131 +0,0 @@
|
||||
# Triton Backend for TransformerLLM.
|
||||
backend: "python"
|
||||
max_batch_size: 0
|
||||
|
||||
# Triton should expect as input a single string
|
||||
# input of variable length named 'text_input'
|
||||
input [
|
||||
|
||||
{
|
||||
name: "text_input"
|
||||
data_type: TYPE_STRING
|
||||
dims: [ 1 ]
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "max_length"
|
||||
data_type: TYPE_INT32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "max_new_tokens"
|
||||
data_type: TYPE_INT32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "do_sample"
|
||||
data_type: TYPE_BOOL
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "top_k"
|
||||
data_type: TYPE_INT32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "top_p"
|
||||
data_type: TYPE_FP32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "temperature"
|
||||
data_type: TYPE_FP32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "repetition_penalty"
|
||||
data_type: TYPE_FP32
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
},
|
||||
{
|
||||
name: "stream"
|
||||
data_type: TYPE_BOOL
|
||||
dims: [ 1 ]
|
||||
|
||||
optional: true
|
||||
|
||||
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
|
||||
# Triton should expect to respond with a single string
|
||||
# output of variable length named 'text_output'
|
||||
output [
|
||||
|
||||
{
|
||||
name: "text_output"
|
||||
data_type: TYPE_STRING
|
||||
dims: [ 1 ]
|
||||
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
parameters: [
|
||||
{
|
||||
key: "base_model_path",
|
||||
value: {string_value: "/cheetah/input/model/groupuser/TinyLlama-1.1B-Chat-v1.0"}
|
||||
},
|
||||
{
|
||||
key: "is_adapter_model",
|
||||
value: {string_value: "false"}
|
||||
},
|
||||
{
|
||||
key: "adapter_model_path",
|
||||
value: {string_value: ""}
|
||||
},
|
||||
|
||||
{
|
||||
key: "quantization",
|
||||
value: {string_value: "none"}
|
||||
}
|
||||
]
|
||||
|
||||
instance_group [
|
||||
{
|
||||
kind: KIND_AUTO
|
||||
count: 1
|
||||
}
|
||||
]
|
||||
|
||||
16
eval_results.json
Normal file
16
eval_results.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"epoch": 3.0,
|
||||
"eval_logits/chosen": -2.707406759262085,
|
||||
"eval_logits/rejected": -2.656524419784546,
|
||||
"eval_logps/chosen": -370.1297607421875,
|
||||
"eval_logps/rejected": -296.0738525390625,
|
||||
"eval_loss": 0.513750433921814,
|
||||
"eval_rewards/accuracies": 0.738095223903656,
|
||||
"eval_rewards/chosen": -0.02744222804903984,
|
||||
"eval_rewards/margins": 1.0087225437164307,
|
||||
"eval_rewards/rejected": -1.03616464138031,
|
||||
"eval_runtime": 93.5908,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 21.37,
|
||||
"eval_steps_per_second": 0.673
|
||||
}
|
||||
7
generation_config.json
Normal file
7
generation_config.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"max_length": 2048,
|
||||
"pad_token_id": 0,
|
||||
"transformers_version": "4.35.0"
|
||||
}
|
||||
BIN
model.safetensors
(Stored with Git LFS)
Normal file
BIN
model.safetensors
(Stored with Git LFS)
Normal file
Binary file not shown.
30
special_tokens_map.json
Normal file
30
special_tokens_map.json
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
93391
tokenizer.json
Normal file
93391
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
BIN
tokenizer.model
(Stored with Git LFS)
Normal file
Binary file not shown.
40
tokenizer_config.json
Normal file
40
tokenizer_config.json
Normal file
@ -0,0 +1,40 @@
|
||||
{
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"bos_token": "<s>",
|
||||
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "</s>",
|
||||
"legacy": false,
|
||||
"model_max_length": 2048,
|
||||
"pad_token": "</s>",
|
||||
"padding_side": "right",
|
||||
"sp_model_kwargs": {},
|
||||
"tokenizer_class": "LlamaTokenizer",
|
||||
"unk_token": "<unk>",
|
||||
"use_default_system_prompt": false
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user