153 lines
4.8 KiB
Python
153 lines
4.8 KiB
Python
# rag_chain.py
|
|
import os
|
|
import weaviate
|
|
from weaviate import Client
|
|
from weaviate import WeaviateClient
|
|
from weaviate.connect import ConnectionParams
|
|
from weaviate.auth import AuthApiKey
|
|
from weaviate.classes.init import Auth
|
|
from langchain.vectorstores import Weaviate
|
|
from langchain.chains import RetrievalQA
|
|
from langchain.chat_models import ChatOpenAI
|
|
from langchain.embeddings import OpenAIEmbeddings
|
|
|
|
def build_rag_chain():
|
|
# 1. Weaviate 클라이언트
|
|
auth = weaviate.AuthApiKey(api_key="01js3q6y7twaxccm5dbh3se9bt")
|
|
|
|
# client = weaviate.connect_to_weaviate_cloud(cluster_url="http://183.111.96.67:32668",
|
|
# auth_credentials=Auth.api_key("01js3q6y7twaxccm5dbh3se9bt"),
|
|
# headers={
|
|
# "X-OpenAI-Api-Key": "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" # 필요할 경우
|
|
# })
|
|
OPENAI_API_KEY="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"
|
|
|
|
# client = Client(
|
|
# connection_params=ConnectionParams.from_http(
|
|
# host="183.111.96.67", # 도메인 or IP
|
|
# port=32668, # 포트
|
|
# secure=False, # HTTP면 False, HTTPS면 True
|
|
# auth_credentials=auth
|
|
# )
|
|
# )
|
|
|
|
# connection_params = {
|
|
# "http": {
|
|
# "host": "183.111.96.67",
|
|
# "port": 32668,
|
|
# "secure": False
|
|
# },
|
|
# "grpc": {
|
|
# "host": "183.111.96.67",
|
|
# "port": 32619,
|
|
# "secure": False
|
|
# },
|
|
# "auth_credentials": auth
|
|
# }
|
|
http_config = {
|
|
"host": "183.111.96.67",
|
|
"port": 32668,
|
|
"secure": False
|
|
}
|
|
grpc_config = {
|
|
"host": "183.111.96.67",
|
|
"port": 32619,
|
|
"secure": False
|
|
}
|
|
|
|
# connection_params = ConnectionParams(
|
|
# http=HttpConfig(
|
|
# host="183.111.96.67",
|
|
# port=32668,
|
|
# secure=False),
|
|
# grpc=GrpcConfig(
|
|
# host="183.111.96.67",
|
|
# port=32619,
|
|
# secure=False),
|
|
# auth_credentials=auth_config
|
|
# )
|
|
|
|
connection_params = ConnectionParams(
|
|
http=http_config,
|
|
grpc=grpc_config,
|
|
auth_credentials=auth
|
|
)
|
|
|
|
client = WeaviateClient(connection_params=connection_params)
|
|
|
|
client2 = weaviate.connect_to_custom(
|
|
http_host="183.111.96.67",
|
|
http_port=32668,
|
|
grpc_host="183.111.96.67",
|
|
http_secure=False,
|
|
grpc_port=32619,
|
|
grpc_secure=False,
|
|
auth_credentials=auth, # 인증이 필요 없으면 생략 가능
|
|
headers={"X-OpenAI-Api-Key": OPENAI_API_KEY} # 필요시
|
|
)
|
|
|
|
if client.is_ready():
|
|
print("Weaviate 연결 성공!")
|
|
else:
|
|
print("연결 실패. 서버 상태를 확인하세요.")
|
|
|
|
if client2.is_ready():
|
|
print("Weaviate 2 연결 성공!")
|
|
else:
|
|
print("연결 실패 2. 서버 상태를 확인하세요.")
|
|
|
|
|
|
client3 = weaviate.connect_to_weaviate_cloud(
|
|
cluster_url="s4x71b3lt32z3md1ya0yw.c0.asia-southeast1.gcp.weaviate.cloud", # 예: "http://183.111.96.67:32668"
|
|
auth_credentials=Auth.api_key("0ojxypfXy2Y1rKw3KQdBUtwcWSrYPWitE7Qu")
|
|
)
|
|
|
|
if client3.is_ready():
|
|
print("Weaviate 3 연결 성공!")
|
|
else:
|
|
print("연결 실패 3. 서버 상태를 확인하세요.")
|
|
|
|
|
|
# auth_key = AuthCredentials.api_key("01js3q6y7twaxccm5dbh3se9bt") # 없으면 None
|
|
|
|
# 커넥션 설정 (http, grpc 둘 다)
|
|
connection_params = ConnectionParams.from_params(
|
|
http_host="183.111.96.67",
|
|
http_port=32668,
|
|
http_secure=False,
|
|
grpc_host="183.111.96.67",
|
|
grpc_port=32619, # 예시 포트, 실제 grpc 포트 넣어야 함
|
|
grpc_secure=False,
|
|
auth_credentials=auth # 없으면 생략 가능
|
|
)
|
|
|
|
client4 = WeaviateClient(connection_params=connection_params)
|
|
|
|
if client4.is_ready():
|
|
print("Weaviate 4 연결 성공!")
|
|
else:
|
|
print("연결 실패 4. 서버 상태를 확인하세요.")
|
|
|
|
legacy_client = weaviate.Client(connection_params=client4.connection_params) # langchain용
|
|
|
|
|
|
# 2. 벡터스토어
|
|
vectorstore = Weaviate(
|
|
client=legacy_client,
|
|
index_name="LangDocs",
|
|
text_key="text",
|
|
embedding=OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
|
|
)
|
|
|
|
# 3. HuggingFace LLM (예: mistralai/Mistral-7B-Instruct-v0.2)
|
|
llm = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
|
|
|
|
retriever = vectorstore.as_retriever()
|
|
|
|
# 4. RetrievalQA chain 구성
|
|
|
|
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
|
|
|
|
client.close()
|
|
return qa_chain
|