# 프롬프트 생성 formatted_prompt = prompt.format(adjective="funny", topic="programming") print(formatted_prompt) # Output: "Tell me a funny joke about programming."
# LLM과 함께 사용 response = llm.invoke(formatted_prompt) print(response.content)
2.2 ChatPromptTemplate
대화형 애플리케이션을 위한 템플릿입니다.
from langchain.prompts import ChatPromptTemplate
# 시스템 메시지 + 사용자 메시지 prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant that translates {input_language} to {output_language}."), ("human", "{text}") ])
# 메시지 포맷팅 messages = prompt.format_messages( input_language="English", output_language="Korean", text="Hello, how are you?" )
# 실행 result = chain.invoke({"topic": "programming"}) print(result)
3.3 Sequential Chain
여러 체인을 순차적으로 실행합니다.
from langchain.chains import SimpleSequentialChain
# 첫 번째 체인: 주제 생성 chain_one = LLMChain( llm=llm, prompt=PromptTemplate( template="Give me a topic for a blog post about {subject}.", input_variables=["subject"] ) )
# 두 번째 체인: 블로그 작성 chain_two = LLMChain( llm=llm, prompt=PromptTemplate( template="Write a blog post outline about this topic:\n{topic}", input_variables=["topic"] ) )
# 체인 연결 overall_chain = SimpleSequentialChain( chains=[chain_one, chain_two], verbose=True )
# 실행 result = overall_chain.invoke("artificial intelligence") print(result["output"])
3.4 Router Chain
조건에 따라 다른 체인을 실행합니다.
from langchain.chains.router import MultiPromptChain from langchain.chains import ConversationChain from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.prompts import PromptTemplate
# 물리학 체인 physics_template = """You are a physics professor. Answer this question: {input}"""
# 프롬프트 정보 prompt_infos = [ { "name": "physics", "description": "Good for answering physics questions", "prompt_template": physics_template }, { "name": "math", "description": "Good for answering math questions", "prompt_template": math_template } ]
# 라우터 체인 구성 destination_chains = {} for p_info in prompt_infos: name = p_info["name"] prompt = PromptTemplate( template=p_info["prompt_template"], input_variables=["input"] ) chain = LLMChain(llm=llm, prompt=prompt) destination_chains[name] = chain
# 기본 체인 default_chain = ConversationChain(llm=llm, output_key="text")
# 라우터 체인 destinations = [f"{p['name']}: {p['description']}"for p in prompt_infos] destinations_str = "\n".join(destinations)
router_template = f"""Given a raw text input to a language model, select the appropriate destination. << DESTINATIONS >> {destinations_str} << INPUT >> {{input}} << OUTPUT (must be a single word) >> """
# 실행 print(chain.invoke("What is Newton's second law?")) print(chain.invoke("What is the derivative of x^2?"))
4. Memory (메모리)
메모리는 대화의 컨텍스트를 유지하는 기능입니다.
4.1 ConversationBufferMemory
전체 대화 내용을 저장합니다.
from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationChain
# 메모리 초기화 memory = ConversationBufferMemory()
# 대화 체인 conversation = ConversationChain( llm=llm, memory=memory, verbose=True )
# 대화 print(conversation.predict(input="Hi, my name is John")) print(conversation.predict(input="What's 1+1?")) print(conversation.predict(input="What's my name?"))
# 메모리 확인 print(memory.load_memory_variables({}))
4.2 ConversationBufferWindowMemory
최근 N개의 대화만 저장합니다.
from langchain.memory import ConversationBufferWindowMemory
# 최근 2개의 대화만 유지 memory = ConversationBufferWindowMemory(k=2)
conversation.predict(input="Hi, my name is John") conversation.predict(input="I'm 30 years old") conversation.predict(input="I live in Seoul") conversation.predict(input="What's my name?") # 기억하지 못함 (k=2)
4.3 ConversationSummaryMemory
대화를 요약하여 저장합니다.
from langchain.memory import ConversationSummaryMemory
# 요약 메모리 memory = ConversationSummaryMemory(llm=llm)
conversation.predict(input="Hi, I'm planning a trip to Paris. I want to visit the Eiffel Tower and Louvre Museum.") conversation.predict(input="What did I say about my trip?")
# 요약된 내용 확인 print(memory.load_memory_variables({}))
4.4 ConversationEntityMemory
엔티티(사람, 장소 등)별로 정보를 저장합니다.
from langchain.memory import ConversationEntityMemory
conversation.predict(input="John is 30 years old and lives in Seoul") conversation.predict(input="Sarah is 25 years old and lives in Busan") conversation.predict(input="How old is John?")
# 엔티티별 정보 확인 print(memory.entity_store.store)
5. 출력 파서(Output Parsers)
LLM의 출력을 구조화된 형식으로 변환합니다.
5.1 기본 문자열 파서
from langchain_core.output_parsers import StrOutputParser
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
# 응답 스키마 정의 response_schemas = [ ResponseSchema(name="name", description="The name of the person"), ResponseSchema(name="age", description="The age of the person"), ResponseSchema(name="city", description="The city where the person lives") ]
from langchain.output_parsers import PydanticOutputParser from pydantic import BaseModel, Field
# 데이터 모델 정의 classPerson(BaseModel): name: str = Field(description="The person's name") age: int = Field(description="The person's age") city: str = Field(description="The city where the person lives") hobbies: list[str] = Field(description="The person's hobbies")
# 파서 생성 parser = PydanticOutputParser(pydantic_object=Person)
# 실행 result = chain.invoke({ "text": "John is 30 years old, lives in Seoul, and enjoys hiking and reading", "format_instructions": parser.get_format_instructions() })
defcalculator(a: float, b: float, operation: str) -> float: """Perform basic arithmetic operations.""" if operation == "add": return a + b elif operation == "subtract": return a - b elif operation == "multiply": return a * b elif operation == "divide": return a / b if b != 0else"Cannot divide by zero"
# 또는 FAISS faiss_store = FAISS.from_documents(splits, embeddings)
# 유사도 검색 query = "What is LangChain?" docs = vectorstore.similarity_search(query, k=3)
for doc in docs: print(doc.page_content) print("---")
7.4 RAG 체인 구성
from langchain.chains import RetrievalQA from langchain.prompts import PromptTemplate
# 프롬프트 템플릿 template = """Use the following context to answer the question. If you don't know the answer, say you don't know. Context: {context} Question: {question} Answer:"""
# 모든 실행이 LangSmith에 기록됨 result = chain.invoke({"topic": "AI"})
10.2 Verbose 모드
# 체인 실행 과정 출력 chain = LLMChain(llm=llm, prompt=prompt, verbose=True) result = chain.invoke({"question": "What is AI?"})
10.3 디버깅
from langchain.globalsimport set_debug
# 디버그 모드 활성화 set_debug(True)
result = chain.invoke({"topic": "AI"})
11. 프로덕션 베스트 프랙티스
11.1 에러 핸들링
from langchain.schema import OutputParserException
try: result = chain.invoke({"input": "test"}) except OutputParserException as e: print(f"Parsing error: {e}") # 재시도 또는 대체 로직 except Exception as e: print(f"General error: {e}") # 에러 처리