Update rag_chain.py

This commit is contained in:
localsoo 2025-04-18 02:03:33 +00:00
parent 2f2776c3ee
commit 3540898b3b

@ -1,26 +1,38 @@
# rag_chain.py
import os
import weaviate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings import OpenAIEmbeddings # 필요 시 HuggingFaceEmbeddings로 교체 가능
from langchain.vectorstores import Weaviate
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFaceHub
def build_rag_chain():
# 1. Weaviate 클라이언트
client = weaviate.Client(
url=os.getenv("WEAVIATE_URL"),
auth_client_secret=weaviate.AuthApiKey(os.getenv("WEAVIATE_API_KEY")),
additional_headers={"X-OpenAI-Api-Key": os.getenv("OPENAI_API_KEY")}
url=os.getenv("WEAVIATE_URL", "http://183.111.96.67:30846"),
auth_client_secret=weaviate.AuthApiKey(os.getenv("WEAVIATE_API_KEY", "01jryrcctd8c8vxbj4bs2ywrgs")),
additional_headers={"X-HuggingFace-Api-Key": os.getenv("OPENAI_API_KEY", "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")}
)
# 2. 벡터스토어
vectorstore = Weaviate(
client=client,
index_name="LangDocs",
text_key="text",
embedding=OpenAIEmbeddings()
embedding=HuggingFaceEmbeddings() # HuggingFaceEmbeddings() 로 교체 가능
)
# 3. HuggingFace LLM (예: mistralai/Mistral-7B-Instruct-v0.2)
llm = HuggingFaceHub(
repo_id="mistralai/Mistral-7B-Instruct-v0.2",
model_kwargs={
"temperature": 0.1,
"max_new_tokens": 512,
"top_p": 0.95,
}
)
retriever = vectorstore.as_retriever()
llm = ChatOpenAI(temperature=0)
return RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
# 4. RetrievalQA chain 구성
return RetrievalQA.from_chain_type(llm=llm, retriever=retriever, chain_type="stuff")