LangChain과 LangGraph로 시작하는 AI 에이전트 개발

LangChain과 LangGraph 개요

LangChain과 LangGraph는 대규모 언어 모델(LLM)을 활용한 애플리케이션 개발을 위한 강력한 프레임워크입니다. LangChain은 LLM 기반 애플리케이션 개발을 위한 기본 구성 요소를 제공하며, LangGraph는 복잡한 AI 에이전트와 워크플로우를 그래프 구조로 설계할 수 있게 해줍니다.

LangChain이란?

LangChain은 LLM을 활용한 애플리케이션을 쉽게 구축할 수 있도록 돕는 오픈소스 프레임워크입니다.

LangChain의 주요 특징

  • 체인(Chain) 기반 구조: 여러 컴포넌트를 연결하여 복잡한 작업 수행
  • 다양한 LLM 지원: OpenAI, Anthropic, HuggingFace 등 다양한 모델 지원
  • Memory 관리: 대화 히스토리 및 컨텍스트 유지
  • 도구 통합: 외부 API, 데이터베이스, 검색 엔진 등과 연동
  • 프롬프트 템플릿: 재사용 가능한 프롬프트 관리

LangGraph란?

LangGraph는 LangChain을 기반으로 상태를 가진(stateful) 멀티 액터 애플리케이션을 그래프로 구축할 수 있는 라이브러리입니다.

LangGraph의 주요 특징

  • 그래프 기반 워크플로우: 복잡한 AI 워크플로우를 노드와 엣지로 표현
  • 상태 관리: 애플리케이션 전체의 상태를 효과적으로 관리
  • 순환 구조 지원: 반복적인 작업 및 피드백 루프 구현 가능
  • 조건부 라우팅: 동적으로 실행 경로 결정
  • Human-in-the-loop: 사람의 개입이 필요한 지점 설정 가능

1. 설치 및 환경 설정

필수 패키지 설치

# LangChain 설치
pip install langchain langchain-openai

# Gemini 사용시
pip install langchain langchain-google-genai

# LangGraph 설치
pip install langgraph

# 추가 유틸리티
pip install python-dotenv

환경 변수 설정

# .env 파일 생성
echo "OPENAI_API_KEY=your-api-key-here" > .env
# config.py
import os
from dotenv import load_dotenv

load_dotenv()

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

기본 설정 확인

from langchain_openai import ChatOpenAI

# LLM 초기화
llm = ChatOpenAI(
model="gpt-4",
temperature=0.7
)

# 간단한 테스트
response = llm.invoke("Hello, LangChain!")
print(response.content)

2. LangChain 기본 사용법

2.1 프롬프트 템플릿

from langchain.prompts import ChatPromptTemplate

# 프롬프트 템플릿 생성
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant that translates {input_language} to {output_language}."),
("human", "{text}")
])

# 프롬프트 포맷팅
messages = prompt.format_messages(
input_language="English",
output_language="Korean",
text="Hello, how are you?"
)

# LLM 호출
response = llm.invoke(messages)
print(response.content)

2.2 체인(Chain) 구성

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

# 프롬프트 템플릿
template = """
Given the following question, provide a detailed answer:

Question: {question}

Answer:
"""

prompt = PromptTemplate(
template=template,
input_variables=["question"]
)

# 체인 생성
chain = LLMChain(llm=llm, prompt=prompt)

# 체인 실행
result = chain.invoke({"question": "What is LangChain?"})
print(result["text"])

2.3 LCEL (LangChain Expression Language)

LCEL은 체인을 더 직관적으로 구성할 수 있는 선언적 방법입니다.

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate

# LCEL을 사용한 체인 구성
prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
output_parser = StrOutputParser()

# 파이프 연산자로 체인 연결
chain = prompt | llm | output_parser

# 실행
result = chain.invoke({"topic": "programming"})
print(result)

2.4 Memory 활용

from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain

# 메모리 초기화
memory = ConversationBufferMemory()

# 대화 체인 생성
conversation = ConversationChain(
llm=llm,
memory=memory,
verbose=True
)

# 대화 시작
response1 = conversation.predict(input="Hi, my name is John")
print(response1)

response2 = conversation.predict(input="What's my name?")
print(response2)

# 대화 히스토리 확인
print(memory.load_memory_variables({}))

2.5 도구(Tools) 사용

from langchain.agents import load_tools, initialize_agent, AgentType
from langchain_community.tools import DuckDuckGoSearchRun

# 검색 도구 초기화
search = DuckDuckGoSearchRun()

# 도구 목록
tools = [search]

# 에이전트 초기화
agent = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)

# 에이전트 실행
result = agent.run("What's the latest news about LangChain?")
print(result)

3. LangGraph 기본 사용법

3.1 간단한 그래프 생성

from langgraph.graph import Graph, StateGraph
from typing import TypedDict

# 상태 정의
class State(TypedDict):
messages: list
count: int

# 노드 함수 정의
def node_1(state: State) -> State:
print("Node 1 executed")
state["count"] += 1
state["messages"].append("Processed by node 1")
return state

def node_2(state: State) -> State:
print("Node 2 executed")
state["count"] += 1
state["messages"].append("Processed by node 2")
return state

# 그래프 생성
workflow = StateGraph(State)

# 노드 추가
workflow.add_node("node1", node_1)
workflow.add_node("node2", node_2)

# 엣지 추가
workflow.add_edge("node1", "node2")

# 시작 노드 설정
workflow.set_entry_point("node1")

# 종료 노드 설정
workflow.set_finish_point("node2")

# 그래프 컴파일
app = workflow.compile()

# 실행
initial_state = {
"messages": [],
"count": 0
}

result = app.invoke(initial_state)
print(result)

3.2 조건부 라우팅

from langgraph.graph import END

# 상태 정의
class RouterState(TypedDict):
input: str
route: str
output: str

# 라우터 노드
def router(state: RouterState) -> RouterState:
if "hello" in state["input"].lower():
state["route"] = "greeting"
else:
state["route"] = "general"
return state

# 처리 노드들
def greeting_node(state: RouterState) -> RouterState:
state["output"] = "Hello! How can I help you?"
return state

def general_node(state: RouterState) -> RouterState:
state["output"] = "I'll help you with that."
return state

# 조건 함수
def route_decision(state: RouterState) -> str:
if state["route"] == "greeting":
return "greeting"
return "general"

# 그래프 구성
workflow = StateGraph(RouterState)

workflow.add_node("router", router)
workflow.add_node("greeting", greeting_node)
workflow.add_node("general", general_node)

workflow.set_entry_point("router")

# 조건부 엣지 추가
workflow.add_conditional_edges(
"router",
route_decision,
{
"greeting": "greeting",
"general": "general"
}
)

workflow.add_edge("greeting", END)
workflow.add_edge("general", END)

app = workflow.compile()

# 테스트
result1 = app.invoke({"input": "hello there", "route": "", "output": ""})
print(result1["output"]) # "Hello! How can I help you?"

result2 = app.invoke({"input": "what's the weather", "route": "", "output": ""})
print(result2["output"]) # "I'll help you with that."

3.3 AI 에이전트 구현

from langchain_openai import ChatOpenAI
from langchain.tools import tool
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator

# 도구 정의
@tool
def calculator(expression: str) -> str:
"""Calculate a mathematical expression."""
try:
result = eval(expression)
return f"The result is {result}"
except Exception as e:
return f"Error: {str(e)}"

@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
# 실제로는 API 호출
return f"The weather in {city} is sunny, 22°C"

# 상태 정의
class AgentState(TypedDict):
messages: Annotated[list, operator.add]
next_action: str

# LLM 초기화
llm = ChatOpenAI(model="gpt-4", temperature=0)

# 도구를 LLM에 바인딩
tools = [calculator, get_weather]
llm_with_tools = llm.bind_tools(tools)

# 에이전트 노드
def agent(state: AgentState):
messages = state["messages"]
response = llm_with_tools.invoke(messages)
return {"messages": [response]}

# 도구 실행 노드
def execute_tools(state: AgentState):
messages = state["messages"]
last_message = messages[-1]

# 도구 호출 확인
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
tool_call = last_message.tool_calls[0]

# 도구 실행
for tool in tools:
if tool.name == tool_call["name"]:
result = tool.invoke(tool_call["args"])
return {"messages": [result]}

return {"messages": []}

# 다음 단계 결정
def should_continue(state: AgentState) -> str:
messages = state["messages"]
last_message = messages[-1]

if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
return END

# 그래프 구성
workflow = StateGraph(AgentState)

workflow.add_node("agent", agent)
workflow.add_node("tools", execute_tools)

workflow.set_entry_point("agent")

workflow.add_conditional_edges(
"agent",
should_continue,
{
"tools": "tools",
END: END
}
)

workflow.add_edge("tools", "agent")

agent_app = workflow.compile()

# 실행
result = agent_app.invoke({
"messages": [("user", "What is 25 * 4?")]
})

for msg in result["messages"]:
print(msg)

4. 실전 예제: RAG 시스템 구축

4.1 문서 로딩 및 벡터 저장소 구성

from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma

# 문서 로드
loader = TextLoader("documents/sample.txt")
documents = loader.load()

# 텍스트 분할
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
splits = text_splitter.split_documents(documents)

# 임베딩 생성
embeddings = OpenAIEmbeddings()

# 벡터 저장소 생성
vectorstore = Chroma.from_documents(
documents=splits,
embedding=embeddings,
persist_directory="./chroma_db"
)

4.2 RAG 체인 구성

from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate

# 프롬프트 템플릿
template = """
Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.

Context: {context}

Question: {question}

Answer:
"""

QA_PROMPT = PromptTemplate(
template=template,
input_variables=["context", "question"]
)

# RAG 체인 생성
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
chain_type_kwargs={"prompt": QA_PROMPT},
return_source_documents=True
)

# 질문 실행
result = qa_chain.invoke({"query": "What is the main topic of the document?"})

print("Answer:", result["result"])
print("\nSource Documents:")
for doc in result["source_documents"]:
print(f"- {doc.page_content[:100]}...")

4.3 LangGraph를 활용한 고급 RAG

from langgraph.graph import StateGraph, END
from typing import TypedDict

class RAGState(TypedDict):
question: str
documents: list
answer: str
needs_refinement: bool

# 검색 노드
def retrieve(state: RAGState) -> RAGState:
question = state["question"]
docs = vectorstore.similarity_search(question, k=3)
state["documents"] = docs
return state

# 답변 생성 노드
def generate(state: RAGState) -> RAGState:
question = state["question"]
docs = state["documents"]

context = "\n\n".join([doc.page_content for doc in docs])

prompt = f"""
Based on the following context, answer the question.

Context: {context}

Question: {question}

Answer:
"""

response = llm.invoke(prompt)
state["answer"] = response.content
state["needs_refinement"] = False
return state

# 품질 확인 노드
def check_quality(state: RAGState) -> RAGState:
answer = state["answer"]
question = state["question"]

# 답변 품질 평가
eval_prompt = f"""
Evaluate if this answer adequately addresses the question.

Question: {question}
Answer: {answer}

Reply with just 'yes' or 'no'.
"""

response = llm.invoke(eval_prompt)

if "no" in response.content.lower():
state["needs_refinement"] = True

return state

# 개선 노드
def refine(state: RAGState) -> RAGState:
question = state["question"]
previous_answer = state["answer"]
docs = state["documents"]

context = "\n\n".join([doc.page_content for doc in docs])

refine_prompt = f"""
The previous answer was not satisfactory.

Context: {context}
Question: {question}
Previous Answer: {previous_answer}

Provide a better, more detailed answer:
"""

response = llm.invoke(refine_prompt)
state["answer"] = response.content
state["needs_refinement"] = False
return state

# 라우팅 함수
def should_refine(state: RAGState) -> str:
if state.get("needs_refinement", False):
return "refine"
return END

# 그래프 구성
rag_workflow = StateGraph(RAGState)

rag_workflow.add_node("retrieve", retrieve)
rag_workflow.add_node("generate", generate)
rag_workflow.add_node("check_quality", check_quality)
rag_workflow.add_node("refine", refine)

rag_workflow.set_entry_point("retrieve")
rag_workflow.add_edge("retrieve", "generate")
rag_workflow.add_edge("generate", "check_quality")

rag_workflow.add_conditional_edges(
"check_quality",
should_refine,
{
"refine": "refine",
END: END
}
)

rag_workflow.add_edge("refine", END)

rag_app = rag_workflow.compile()

# 실행
result = rag_app.invoke({
"question": "What are the key features discussed?",
"documents": [],
"answer": "",
"needs_refinement": False
})

print("Final Answer:", result["answer"])

5. Human-in-the-Loop 패턴

from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.graph import StateGraph, END

class ApprovalState(TypedDict):
task: str
plan: str
approved: bool
result: str

# 계획 생성 노드
def create_plan(state: ApprovalState) -> ApprovalState:
task = state["task"]

prompt = f"Create a detailed plan for: {task}"
response = llm.invoke(prompt)

state["plan"] = response.content
return state

# 승인 대기 노드
def wait_for_approval(state: ApprovalState) -> ApprovalState:
print("\n=== Plan for Approval ===")
print(state["plan"])
print("\n========================")

# 실제로는 여기서 중단되고 사용자 입력을 기다림
return state

# 실행 노드
def execute_plan(state: ApprovalState) -> ApprovalState:
plan = state["plan"]

prompt = f"Execute this plan and provide results:\n{plan}"
response = llm.invoke(prompt)

state["result"] = response.content
return state

# 승인 확인 함수
def check_approval(state: ApprovalState) -> str:
if state.get("approved", False):
return "execute"
return END

# 체크포인트를 사용한 그래프 구성
memory = SqliteSaver.from_conn_string(":memory:")

workflow = StateGraph(ApprovalState)

workflow.add_node("create_plan", create_plan)
workflow.add_node("wait_approval", wait_for_approval)
workflow.add_node("execute", execute_plan)

workflow.set_entry_point("create_plan")
workflow.add_edge("create_plan", "wait_approval")

workflow.add_conditional_edges(
"wait_approval",
check_approval,
{
"execute": "execute",
END: END
}
)

workflow.add_edge("execute", END)

# 체크포인트 활성화
approval_app = workflow.compile(checkpointer=memory)

# 실행
config = {"configurable": {"thread_id": "1"}}

# 1단계: 계획 생성
result = approval_app.invoke({
"task": "Analyze quarterly sales data",
"plan": "",
"approved": False,
"result": ""
}, config)

print("\nPlan created. Review and approve.")

# 2단계: 승인 후 계속 진행
result = approval_app.invoke({
**result,
"approved": True
}, config)

print("\nExecution Result:", result["result"])

6. 멀티 에이전트 시스템

from langgraph.graph import StateGraph, END
from typing import TypedDict, Literal

class MultiAgentState(TypedDict):
task: str
research_output: str
writing_output: str
review_output: str
final_output: str
current_step: str

# 연구 에이전트
def research_agent(state: MultiAgentState) -> MultiAgentState:
task = state["task"]

prompt = f"""
As a research agent, gather and analyze information about: {task}
Provide key findings and data.
"""

response = llm.invoke(prompt)
state["research_output"] = response.content
state["current_step"] = "research_done"
return state

# 작성 에이전트
def writing_agent(state: MultiAgentState) -> MultiAgentState:
research = state["research_output"]

prompt = f"""
As a writing agent, create a well-structured article based on this research:

{research}
"""

response = llm.invoke(prompt)
state["writing_output"] = response.content
state["current_step"] = "writing_done"
return state

# 검토 에이전트
def review_agent(state: MultiAgentState) -> MultiAgentState:
writing = state["writing_output"]

prompt = f"""
As a review agent, critique this article and suggest improvements:

{writing}

Provide constructive feedback.
"""

response = llm.invoke(prompt)
state["review_output"] = response.content
state["current_step"] = "review_done"
return state

# 최종 정리 에이전트
def finalize_agent(state: MultiAgentState) -> MultiAgentState:
writing = state["writing_output"]
review = state["review_output"]

prompt = f"""
Based on the review feedback, finalize this article:

Original: {writing}

Feedback: {review}

Provide the final version.
"""

response = llm.invoke(prompt)
state["final_output"] = response.content
state["current_step"] = "complete"
return state

# 그래프 구성
multi_agent_workflow = StateGraph(MultiAgentState)

multi_agent_workflow.add_node("research", research_agent)
multi_agent_workflow.add_node("writing", writing_agent)
multi_agent_workflow.add_node("review", review_agent)
multi_agent_workflow.add_node("finalize", finalize_agent)

multi_agent_workflow.set_entry_point("research")
multi_agent_workflow.add_edge("research", "writing")
multi_agent_workflow.add_edge("writing", "review")
multi_agent_workflow.add_edge("review", "finalize")
multi_agent_workflow.add_edge("finalize", END)

multi_agent_app = multi_agent_workflow.compile()

# 실행
result = multi_agent_app.invoke({
"task": "The impact of AI on software development",
"research_output": "",
"writing_output": "",
"review_output": "",
"final_output": "",
"current_step": ""
})

print("=== Research ===")
print(result["research_output"])
print("\n=== Writing ===")
print(result["writing_output"])
print("\n=== Review ===")
print(result["review_output"])
print("\n=== Final Output ===")
print(result["final_output"])

7. 스트리밍과 비동기 처리

7.1 스트리밍

# LLM 스트리밍
for chunk in llm.stream("Tell me a long story"):
print(chunk.content, end="", flush=True)

# LangGraph 스트리밍
for event in app.stream({"messages": [("user", "Hello")]}):
print(event)

7.2 비동기 처리

import asyncio

async def async_agent_call():
result = await agent_app.ainvoke({
"messages": [("user", "What's the weather?")]
})
return result

# 실행
result = asyncio.run(async_agent_call())
print(result)

8. 모니터링과 디버깅

8.1 LangSmith 통합

import os

# LangSmith 활성화
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "your-langsmith-api-key"
os.environ["LANGCHAIN_PROJECT"] = "my-project"

# 이제 모든 호출이 LangSmith에 기록됨
result = chain.invoke({"question": "Test question"})

8.2 로깅

import logging

# LangChain 로깅 활성화
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("langchain").setLevel(logging.DEBUG)

# 실행 시 상세 로그 출력
result = chain.invoke({"question": "Test"})

8.3 그래프 시각화

from IPython.display import Image, display

# 그래프 시각화 (Jupyter Notebook)
display(Image(app.get_graph().draw_mermaid_png()))

# 또는 Mermaid 코드 생성
print(app.get_graph().draw_mermaid())

9. 베스트 프랙티스

9.1 에러 핸들링

from langchain.callbacks.base import BaseCallbackHandler

class ErrorHandler(BaseCallbackHandler):
def on_llm_error(self, error: Exception, **kwargs):
print(f"LLM Error: {error}")

def on_chain_error(self, error: Exception, **kwargs):
print(f"Chain Error: {error}")

# 콜백 사용
chain = LLMChain(
llm=llm,
prompt=prompt,
callbacks=[ErrorHandler()]
)

# try-except 패턴
def safe_node(state):
try:
# 처리 로직
return state
except Exception as e:
state["error"] = str(e)
return state

9.2 프롬프트 관리

# 프롬프트를 파일로 관리
from langchain.prompts import load_prompt

prompt = load_prompt("prompts/qa_prompt.yaml")

# 또는 프롬프트 허브 사용
from langchain import hub

prompt = hub.pull("rlm/rag-prompt")

9.3 캐싱

from langchain.cache import InMemoryCache
from langchain.globals import set_llm_cache

# 캐시 활성화
set_llm_cache(InMemoryCache())

# 같은 질문에 대해 캐시된 응답 사용
result1 = llm.invoke("What is 2+2?")
result2 = llm.invoke("What is 2+2?") # 캐시에서 가져옴

9.4 비용 최적화

from langchain.callbacks import get_openai_callback

# 비용 추적
with get_openai_callback() as cb:
result = chain.invoke({"question": "Test"})
print(f"Total Tokens: {cb.total_tokens}")
print(f"Total Cost: ${cb.total_cost}")

10. 프로덕션 배포

10.1 LangServe로 API 서버 구축

from fastapi import FastAPI
from langserve import add_routes

app = FastAPI(
title="LangChain Server",
version="1.0"
)

# 체인을 API 엔드포인트로 추가
add_routes(
app,
chain,
path="/chain"
)

# 실행
# uvicorn main:app --reload

10.2 Docker 배포

# Dockerfile
FROM python:3.11-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install -r requirements.txt

COPY . .

CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
# docker-compose.yml
version: '3.8'

services:
langchain-app:
build: .
ports:
- "8000:8000"
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- ./chroma_db:/app/chroma_db

10.3 환경별 설정

# config.py
from pydantic_settings import BaseSettings

class Settings(BaseSettings):
openai_api_key: str
model_name: str = "gpt-4"
temperature: float = 0.7
max_tokens: int = 1000

class Config:
env_file = ".env"

settings = Settings()

# 사용
llm = ChatOpenAI(
model=settings.model_name,
temperature=settings.temperature,
max_tokens=settings.max_tokens
)

11. 고급 패턴

11.1 Self-RAG (자체 수정 RAG)

class SelfRAGState(TypedDict):
question: str
documents: list
answer: str
relevance_score: float
iteration: int

def retrieve_with_grading(state: SelfRAGState) -> SelfRAGState:
# 문서 검색 및 관련성 평가
docs = vectorstore.similarity_search(state["question"], k=5)

# 관련성 점수 계산
grader_prompt = f"Rate the relevance of these documents to the question: {state['question']}"
# ... 평가 로직

state["documents"] = docs
state["relevance_score"] = 0.8 # 예시
return state

11.2 Plan-and-Execute

def plan(state):
# 전체 계획 수립
prompt = f"Create a step-by-step plan to answer: {state['question']}"
plan = llm.invoke(prompt)
state["plan"] = plan.content
return state

def execute_step(state):
# 각 단계 실행
current_step = state["current_step"]
# ... 실행 로직
return state
Share