rag-agent-soo/app/rag_chain.py
2025-04-21 05:04:23 +00:00

130 lines
4.3 KiB
Python

# rag_chain.py
import os
import weaviate
from weaviate import Client
# from weaviate import WeaviateClient
# from weaviate.connect import ConnectionParams
from weaviate.auth import AuthApiKey
from weaviate.auth import AuthCredentials
from weaviate.classes.init import Auth
# from langchain.vectorstores import Weaviate
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Weaviate as WeaviateLangchain
from langchain.embeddings.openai import OpenAIEmbeddings
def build_rag_chain():
# 1. Weaviate 클라이언트
auth = weaviate.AuthApiKey(api_key="01js3q6y7twaxccm5dbh3se9bt")
OPENAI_API_KEY="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"
# http_config = {
# "host": "183.111.96.67",
# "port": 32668,
# "secure": False
# }
# grpc_config = {
# "host": "183.111.96.67",
# "port": 32619,
# "secure": False
# }
# connection_params = ConnectionParams(
# http=http_config,
# grpc=grpc_config,
# auth_credentials=auth
# )
# client = WeaviateClient(connection_params=connection_params)
# client2 = weaviate.connect_to_custom(
# http_host="183.111.96.67",
# http_port=32668,
# grpc_host="183.111.96.67",
# http_secure=False,
# grpc_port=32619,
# grpc_secure=False,
# auth_credentials=auth, # 인증이 필요 없으면 생략 가능
# headers={"X-OpenAI-Api-Key": OPENAI_API_KEY} # 필요시
# )
# if client.is_ready():
# print("Weaviate 연결 성공!")
# else:
# print("연결 실패. 서버 상태를 확인하세요.")
# if client2.is_ready():
# print("Weaviate 2 연결 성공!")
# else:
# print("연결 실패 2. 서버 상태를 확인하세요.")
# client3 = weaviate.connect_to_weaviate_cloud(
# cluster_url="s4x71b3lt32z3md1ya0yw.c0.asia-southeast1.gcp.weaviate.cloud", # 예: "http://183.111.96.67:32668"
# auth_credentials=Auth.api_key("0ojxypfXy2Y1rKw3KQdBUtwcWSrYPWitE7Qu")
# )
# if client3.is_ready():
# print("Weaviate 3 연결 성공!")
# else:
# print("연결 실패 3. 서버 상태를 확인하세요.")
# auth_key = AuthCredentials.api_key("01js3q6y7twaxccm5dbh3se9bt") # 없으면 None
# auth_cre= AuthCredentials.api_key("01js3q6y7twaxccm5dbh3se9bt")
# 커넥션 설정 (http, grpc 둘 다)
# auth_client_secret=Auth.api_key("01js3q6y7twaxccm5dbh3se9bt")
# connection_params = ConnectionParams.from_params(
# http_host="183.111.96.67",
# http_port=32668,
# http_secure=False,
# grpc_host="183.111.96.67",
# grpc_port=32619, # 예시 포트, 실제 grpc 포트 넣어야 함
# grpc_secure=False
# )
# client4 = WeaviateClient(connection_params=connection_params, auth_client_secret=auth_client_secret)
# if client4.is_ready():
# print("Weaviate 4 연결 성공!")
# else:
# print("연결 실패 4. 서버 상태를 확인하세요.")
# import weaviate as legacy_weaviate
# legacy_client = legacy_weaviate.Client(connection_params=client4.connection_params) # langchain용
client = weaviate.Client(
url="http://183.111.96.67:32668", # 예: http://localhost:8080
auth_client_secret=weaviate.AuthApiKey(api_key="01js3q6y7twaxccm5dbh3se9bt"),
additional_headers={
"X-OpenAI-Api-Key": OPENAI_API_KEY
}
)
vectorstore = WeaviateLangchain(client=client, index_name="LangDocs", text_key="text", embedding=OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY))
# 2. 벡터스토어
# vectorstore = Weaviate(
# client=client,
# index_name="LangDocs",
# text_key="text",
# embedding=OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# )
# 3. HuggingFace LLM (예: mistralai/Mistral-7B-Instruct-v0.2)
llm = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
retriever = vectorstore.as_retriever()
# 4. RetrievalQA chain 구성
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
client.close()
return qa_chain