Qwen3-4B-Instruct-2507-mahj.../1/model.py
2026-03-16 07:33:02 +00:00

242 lines
9.5 KiB
Python

import json
import torch
import numpy as np
import triton_python_backend_utils as pb_utils
import uuid
import transformers
from typing import List, Dict, Any, Union, Tuple
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
BitsAndBytesConfig,
)
from peft import PeftModel, PeftConfig
class TritonPythonModel:
def initialize(self, args: Dict[str, str]):
"""
모델 초기화: 라이브러리 버전 확인 및 모델/토크나이저 로드
"""
self.logger = pb_utils.Logger
self.model_config = json.loads(args["model_config"])
self.model_name = args["model_name"]
# 1. 라이브러리 버전 로그 추가
# GGUF 로드를 위해서는 최소 4.40.0 이상을 권장합니다.
transformers_version = transformers.__version__
self.logger.log_info(f"================ {self.model_name} Setup ================")
self.logger.log_info(f"Transformers Version: {transformers_version}")
self.logger.log_info(f"Torch Version: {torch.__version__}")
# 설정 파라미터 로드
self.base_model_path = self._get_config_param("base_model_path")
self.gguf_filename = self._get_config_param("gguf_filename")
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.logger.log_info(f"Base Model Path: {self.base_model_path}")
self.logger.log_info(f"GGUF Filename: {self.gguf_filename}")
self.logger.log_info(f"Device: {self.device}")
# 2. 모델 및 토크나이저 로드 실행
self._load_model_and_tokenizer()
self.logger.log_info(f"Model initialized successfully.")
def _load_model_and_tokenizer(self):
"""
config.pbtxt의 파라미터를 사용하여 GGUF 모델을 로드합니다.
Transformers 라이브러리가 GGUF를 읽어 fp16으로 역양자화합니다.
"""
# 1. config.pbtxt에서 설정값 읽기
load_path = self.base_model_path # /cheetah/input/model/groupuser/Qwen3-4B-Instruct-2507-mahjong-alpha
gguf_file = self._get_config_param("gguf_filename") # Qwen3-4B-Instruct-2507-mahjong-alpha.gguf
self.logger.log_info(f"Loading GGUF from: {load_path}/{gguf_file}")
try:
# 2. Tokenizer 로드 (GGUF 파일 내의 토크나이저 메타데이터 참조)
self.tokenizer = AutoTokenizer.from_pretrained(
load_path,
gguf_file=gguf_file,
trust_remote_code=True
)
# 3. Model 로드 (GGUF -> PyTorch fp16 변환)
# 주의: GGUF 로드 시 bnb_config(int4/8)와 중복 사용은 불가능할 수 있습니다.
self.model = AutoModelForCausalLM.from_pretrained(
load_path,
gguf_file=gguf_file,
torch_dtype=torch.float16,
device_map="auto",
local_files_only=True,
trust_remote_code=True
)
self.model.eval()
# 패딩 토큰 설정
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.supports_chat_template = (
hasattr(self.tokenizer, "chat_template") and
self.tokenizer.chat_template is not None
)
self.logger.log_info("GGUF Model and Tokenizer loaded successfully via Transformers.")
except Exception as e:
self.logger.log_error(f"Failed to load GGUF model: {e}")
raise e
def _get_bnb_config(self) -> Union[BitsAndBytesConfig, None]:
if self.quantization == "int4":
return BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16
)
elif self.quantization == "int8":
return BitsAndBytesConfig(
load_in_8bit=True,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=True
)
return None
def execute(self, requests):
"""Triton Inference Request 처리 메인 루프"""
responses = []
for request in requests:
# [ID 생성 로직] - 로그 추적용으로 유지 (Response에는 포함 X)
request_id = request.request_id()
if not request_id:
request_id = str(uuid.uuid4())
try:
# 1. 입력 데이터 파싱
input_data, is_chat = self._parse_input(request)
# [LOGGING] Request ID 포함하여 로그 출력
log_input_str = json.dumps(input_data, ensure_ascii=False) if isinstance(input_data, (list, dict)) else str(input_data)
self.logger.log_info(f"\n[RID: {request_id}] >>> [{'CHAT' if is_chat else 'TEXT'}][Input]: {log_input_str}")
# 2. Generation Config 생성
gen_config = self._create_generation_config(request)
# 3. 토크나이징
inputs = self._tokenize(input_data, is_chat)
# 4. 모델 추론 (Generate)
output_text = self._generate(inputs, gen_config)
# [LOGGING] Request ID 포함하여 결과 출력
self.logger.log_info(f"\n[RID: {request_id}] <<< [Output]: {output_text}")
# 5. 응답 생성
responses.append(self._create_response(output_text, request_id))
except Exception as e:
self.logger.log_error(f"[RID: {request_id}] Error during execution: {e}")
err_tensor = pb_utils.Tensor("text_output", np.array([str(e).encode('utf-8')], dtype=np.bytes_))
responses.append(pb_utils.InferenceResponse(output_tensors=[err_tensor]))
return responses
def _parse_input(self, request) -> Tuple[Union[str, List[Dict]], bool]:
input_text = self._get_input_scalar(request, "text_input")
try:
conversation = json.loads(input_text)
if isinstance(conversation, list):
return conversation, True
except (json.JSONDecodeError, TypeError):
pass
return input_text, False
def _tokenize(self, input_data, is_chat: bool):
if self.supports_chat_template and is_chat:
return self.tokenizer.apply_chat_template(
input_data,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True
).to(self.device)
else:
if is_chat:
input_data = str(input_data)
return self.tokenizer(input_data, return_tensors="pt").to(self.device)
def _generate(self, inputs, gen_config: GenerationConfig) -> str:
input_ids = inputs["input_ids"]
input_len = input_ids.shape[-1]
with torch.no_grad():
outputs = self.model.generate(
**inputs,
generation_config=gen_config,
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=self.tokenizer.eos_token_id
)
generated_tokens = outputs[0][input_len:]
decoded_output = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
return decoded_output.strip()
def _create_generation_config(self, request) -> GenerationConfig:
def get_param(name, default=None, cast_type=None):
val = self._get_input_scalar(request, name, default)
if val is not None and cast_type:
return cast_type(val)
return val
return GenerationConfig(
max_length=get_param("max_length", 1024, int),
max_new_tokens=get_param("max_new_tokens", 256, int),
temperature=get_param("temperature", 1.0, float),
do_sample=get_param("do_sample", False, bool),
top_k=get_param("top_k", 50, int),
top_p=get_param("top_p", 1.0, float),
repetition_penalty=get_param("repetition_penalty", 1.0, float),
)
def _create_response(self, output_text: str, request_id: str):
"""생성된 텍스트를 Triton Response 객체로 변환"""
output_tensor = pb_utils.Tensor(
"text_output",
np.array([output_text.encode('utf-8')], dtype=np.bytes_)
)
return pb_utils.InferenceResponse(output_tensors=[output_tensor])
def _get_config_param(self, key: str, default: str = None) -> str:
params = self.model_config.get('parameters', {})
if key in params:
return params[key].get('string_value', default)
return default
def _get_input_scalar(self, request, name: str, default=None):
tensor = pb_utils.get_input_tensor_by_name(request, name)
if tensor is None:
return default
return self._np_decoder(tensor.as_numpy()[0])
def _np_decoder(self, obj):
if isinstance(obj, bytes):
return obj.decode('utf-8')
if np.issubdtype(obj, np.integer):
return int(obj)
if np.issubdtype(obj, np.floating):
return round(float(obj), 3)
if isinstance(obj, np.bool_):
return bool(obj)
def finalize(self):
self.logger.log_info(f"Finalizing model {self.model_name}")
self.model = None
self.tokenizer = None
torch.cuda.empty_cache()