Update 1/model.py

This commit is contained in:
cheetahadmin 2026-03-16 08:19:55 +00:00
parent b9941ada86
commit a3018cd5f5

@ -22,6 +22,7 @@ import torch
import numpy as np import numpy as np
import triton_python_backend_utils as pb_utils import triton_python_backend_utils as pb_utils
import uuid import uuid
import transformers
from typing import List, Dict, Any, Union, Tuple from typing import List, Dict, Any, Union, Tuple
from transformers import ( from transformers import (
@ -35,76 +36,80 @@ from peft import PeftModel, PeftConfig
class TritonPythonModel: class TritonPythonModel:
def initialize(self, args: Dict[str, str]): def initialize(self, args: Dict[str, str]):
""" """
모델 초기화: 설정 로드, 로거 설정, 모델 토크나이저 로드 모델 초기화: 라이브러리 버전 확인 모델/토크나이저 로드
""" """
self.logger = pb_utils.Logger self.logger = pb_utils.Logger
self.model_config = json.loads(args["model_config"]) self.model_config = json.loads(args["model_config"])
self.model_name = args["model_name"] self.model_name = args["model_name"]
# 1. 라이브러리 버전 로그 추가
# GGUF 로드를 위해서는 최소 4.40.0 이상을 권장합니다.
transformers_version = transformers.__version__
self.logger.log_info(f"================ {self.model_name} Setup ================")
self.logger.log_info(f"Transformers Version: {transformers_version}")
self.logger.log_info(f"Torch Version: {torch.__version__}")
# 설정 파라미터 로드 # 설정 파라미터 로드
self.base_model_path = self._get_config_param("base_model_path") self.base_model_path = self._get_config_param("base_model_path")
self.is_adapter_model = self._get_config_param("is_adapter_model", "false").lower() == "true" self.gguf_filename = self._get_config_param("gguf_filename")
self.adapter_model_path = self._get_config_param("adapter_model_path")
self.quantization = self._get_config_param("quantization")
self.device = "cuda" if torch.cuda.is_available() else "cpu" self.device = "cuda" if torch.cuda.is_available() else "cpu"
# 설정 로그 출력 self.logger.log_info(f"Base Model Path: {self.base_model_path}")
self.logger.log_info(f"================ {self.model_name} Setup ================") self.logger.log_info(f"GGUF Filename: {self.gguf_filename}")
self.logger.log_info(f"Base Model: {self.base_model_path}")
self.logger.log_info(f"Adapter Mode: {self.is_adapter_model} ({self.adapter_model_path})")
self.logger.log_info(f"Quantization: {self.quantization}")
self.logger.log_info(f"Device: {self.device}") self.logger.log_info(f"Device: {self.device}")
# 2. 모델 및 토크나이저 로드 실행
self._load_model_and_tokenizer() self._load_model_and_tokenizer()
self.logger.log_info(f"Model initialized successfully.") self.logger.log_info(f"Model initialized successfully.")
def _load_model_and_tokenizer(self): def _load_model_and_tokenizer(self):
"""모델과 토크나이저를 로드하고 설정합니다.""" """
# 1. Quantization 설정 config.pbtxt의 파라미터를 사용하여 GGUF 모델을 로드합니다.
bnb_config = self._get_bnb_config() Transformers 라이브러리가 GGUF를 읽어 fp16으로 역양자화합니다.
"""
# 1. config.pbtxt에서 설정값 읽기
load_path = self.base_model_path # /cheetah/input/model/groupuser/Qwen3-4B-Instruct-2507-mahjong-alpha
gguf_file = self._get_config_param("gguf_filename") # Qwen3-4B-Instruct-2507-mahjong-alpha.gguf
# 2. Base Model 로드 self.logger.log_info(f"Loading GGUF from: {load_path}/{gguf_file}")
load_path = self.base_model_path
if self.is_adapter_model:
peft_config = PeftConfig.from_pretrained(self.adapter_model_path)
load_path = peft_config.base_model_name_or_path
try: try:
# 2. Tokenizer 로드 (GGUF 파일 내의 토크나이저 메타데이터 참조)
self.tokenizer = AutoTokenizer.from_pretrained(
load_path,
gguf_file=gguf_file,
trust_remote_code=True
)
# 3. Model 로드 (GGUF -> PyTorch fp16 변환)
# 주의: GGUF 로드 시 bnb_config(int4/8)와 중복 사용은 불가능할 수 있습니다.
self.model = AutoModelForCausalLM.from_pretrained( self.model = AutoModelForCausalLM.from_pretrained(
load_path, load_path,
torch_dtype="auto", gguf_file=gguf_file,
quantization_config=bnb_config, torch_dtype=torch.float16,
device_map="auto", device_map="auto",
local_files_only=True, local_files_only=True,
trust_remote_code=True trust_remote_code=True
) )
self.model.eval()
# 패딩 토큰 설정
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.supports_chat_template = (
hasattr(self.tokenizer, "chat_template") and
self.tokenizer.chat_template is not None
)
self.logger.log_info("GGUF Model and Tokenizer loaded successfully via Transformers.")
except Exception as e: except Exception as e:
self.logger.log_error(f"Failed to load base model: {e}") self.logger.log_error(f"Failed to load GGUF model: {e}")
raise e raise e
# 3. Adapter 병합 (필요 시)
if self.is_adapter_model:
self.model = PeftModel.from_pretrained(self.model, self.adapter_model_path)
self.model.eval()
# 4. Tokenizer 로드
self.tokenizer = AutoTokenizer.from_pretrained(load_path, trust_remote_code=True)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
self.logger.log_info("Pad token was None. Set to EOS token.")
self.supports_chat_template = (
hasattr(self.tokenizer, "chat_template") and
self.tokenizer.chat_template is not None
)
self.logger.log_info(f"Supports Chat Template: {self.supports_chat_template}")
if self.supports_chat_template:
self.logger.log_info(f"Chat Template Content:\n{self.tokenizer.chat_template}")
def _get_bnb_config(self) -> Union[BitsAndBytesConfig, None]: def _get_bnb_config(self) -> Union[BitsAndBytesConfig, None]:
if self.quantization == "int4": if self.quantization == "int4":
return BitsAndBytesConfig( return BitsAndBytesConfig(
@ -252,4 +257,4 @@ class TritonPythonModel:
self.logger.log_info(f"Finalizing model {self.model_name}") self.logger.log_info(f"Finalizing model {self.model_name}")
self.model = None self.model = None
self.tokenizer = None self.tokenizer = None
torch.cuda.empty_cache() torch.cuda.empty_cache()