LangGraph는 LangChain을 기반으로 상태를 가진(stateful) 멀티 액터 애플리케이션을 그래프로 구축할 수 있는 라이브러리입니다. LangChain이 단순한 체인 구조라면, LangGraph는 복잡한 순환 구조와 조건부 분기를 지원하는 강력한 워크플로우 엔진입니다.
LangGraph의 탄생 배경
LangChain의 체인 방식은 선형적인 워크플로우에는 적합하지만, 다음과 같은 한계가 있었습니다:
순환(cycle) 구조를 구현하기 어려움
복잡한 조건부 분기 처리 한계
상태 관리의 어려움
Human-in-the-loop 구현의 복잡성
LangGraph는 이러한 한계를 극복하기 위해 만들어졌습니다.
LangGraph의 핵심 개념
┌─────────┐ │ START │ └────┬────┘ │ ┌────▼────┐ │ Node A │ └────┬────┘ │ ┌───────┴───────┐ │ │ ┌───▼───┐ ┌───▼───┐ │Node B │ │Node C │ └───┬───┘ └───┬───┘ │ │ └───────┬───────┘ │ ┌────▼────┐ │ END │ └─────────┘
from langchain_openai import ChatOpenAI from langchain.tools import tool from langgraph.graph import StateGraph, END from typing import TypedDict, Annotated import operator
# 도구 정의 @tool defsearch(query: str) -> str: """Search for information.""" # 실제로는 검색 API 호출 returnf"Search results for: {query}"
@tool defcalculator(expression: str) -> str: """Calculate a mathematical expression.""" try: result = eval(expression) returnf"Result: {result}" except Exception as e: returnf"Error: {str(e)}"
# 상태 정의 classAgentState(TypedDict): messages: Annotated[list, operator.add] next_action: str
# 도구 실행 tool_messages = [] for tool_call in tool_calls: tool = next((t for t in tools if t.name == tool_call["name"]), None) if tool: result = tool.invoke(tool_call["args"]) tool_messages.append({ "role": "tool", "content": result, "tool_call_id": tool_call["id"] })
return {"messages": tool_messages}
# 계속 진행 여부 결정 defshould_continue(state: AgentState) -> str: messages = state["messages"] last_message = messages[-1]
ifhasattr(last_message, "tool_calls") and last_message.tool_calls: return"tools" return"end"
defcritique_draft(state: ReflectionState): """초안 비평""" prompt = f""" Review this draft and provide constructive criticism: {state['draft']} Is this good enough? If not, what should be improved? """
# 비평에 "good enough" 또는 "acceptable" 포함 시 종료 ifany(word in state["critique"].lower() for word in ["good enough", "acceptable", "approved"]): return"end"
return"revise"
defrevise_draft(state: ReflectionState): """초안 개선""" prompt = f""" Original draft: {state['draft']} Critique: {state['critique']} Please revise the draft based on the critique. """
# 실행 result = reflection_app.invoke({ "messages": [{"role": "user", "content": "Write a blog post about AI safety"}], "draft": "", "critique": "", "iteration": 0, "max_iterations": 3 })
# 연구 에이전트 defresearch_agent(state: MultiAgentState): """정보를 수집하고 분석""" prompt = f""" As a research specialist, gather and analyze information about: {state['task']} Provide key findings and data points. """ response = llm.invoke(prompt) return { "research_output": response.content, "current_agent": "research" }
# 작성 에이전트 defwriting_agent(state: MultiAgentState): """연구 결과를 바탕으로 문서 작성""" prompt = f""" As a professional writer, create a well-structured article based on: Research Findings: {state['research_output']} Original Task: {state['task']} """ response = llm.invoke(prompt) return { "writing_output": response.content, "current_agent": "writing" }
# 검토 에이전트 defreview_agent(state: MultiAgentState): """작성된 문서를 검토""" prompt = f""" As an editor, review this article: {state['writing_output']} Provide constructive feedback and suggestions. """ response = llm.invoke(prompt) return { "review_output": response.content, "current_agent": "review" }
# 최종 에이전트 deffinalize_agent(state: MultiAgentState): """피드백을 반영한 최종 버전""" prompt = f""" Finalize this article based on the review: Original Article: {state['writing_output']} Review Feedback: {state['review_output']} """ response = llm.invoke(prompt) return { "final_output": response.content, "current_agent": "finalize" }
# 그래프 구성 multi_agent_workflow = StateGraph(MultiAgentState)
# 실행 result = multi_agent_app.invoke({ "task": "The impact of artificial intelligence on software development", "research_output": "", "writing_output": "", "review_output": "", "final_output": "", "current_agent": "" })
print("=== Research Output ===") print(result["research_output"]) print("\n=== Writing Output ===") print(result["writing_output"]) print("\n=== Review Output ===") print(result["review_output"]) print("\n=== Final Output ===") print(result["final_output"])
5.2 협업 에이전트 시스템
여러 전문가 에이전트가 협업하여 문제를 해결합니다.
from typing importLiteral
classCollaborationState(TypedDict): messages: Annotated[list, operator.add] next_speaker: str conversation_count: int max_conversations: int
# 전문가 에이전트들 defpython_expert(state: CollaborationState): """Python 전문가""" prompt = f""" You are a Python expert. Based on the conversation, provide your insights. Conversation so far: {state['messages']} """ response = llm.invoke(prompt) return { "messages": [{"role": "python_expert", "content": response.content}], "conversation_count": state["conversation_count"] + 1 }
defarchitecture_expert(state: CollaborationState): """아키텍처 전문가""" prompt = f""" You are a software architecture expert. Provide architectural insights. Conversation: {state['messages']} """ response = llm.invoke(prompt) return { "messages": [{"role": "architect", "content": response.content}], "conversation_count": state["conversation_count"] + 1 }
# 각 에이전트 후 다음 발언자 선택 for node in ["python", "architect", "security"]: collab_workflow.add_conditional_edges( node, select_next_speaker, { "python": "python", "architect": "architect", "security": "security", "end": END } )
collab_app = collab_workflow.compile()
# 실행 result = collab_app.invoke({ "messages": [{"role": "user", "content": "How should we design a scalable authentication system?"}], "next_speaker": "", "conversation_count": 0, "max_conversations": 6 })
for msg in result["messages"]: print(f"\n[{msg.get('role', 'unknown')}]") print(msg.get('content', ''))
6. Human-in-the-Loop
6.1 체크포인트를 활용한 승인 워크플로우
from langgraph.checkpoint.sqlite import SqliteSaver
defrevise_plan(state: ApprovalState): """계획 수정""" prompt = f"Revise this plan to make it better:\n{state['plan']}" response = llm.invoke(prompt) return { "plan": response.content, "needs_revision": False }
# 체크포인트 설정 memory = SqliteSaver.from_conn_string(":memory:")
# 체크포인트와 함께 컴파일 approval_app = workflow.compile(checkpointer=memory)
# 사용 예시 config = {"configurable": {"thread_id": "1"}}
# 1단계: 계획 생성 result = approval_app.invoke({ "task": "Implement a user authentication system", "plan": "", "approved": False, "result": "", "needs_revision": False }, config)
print("\nPlan created. Review the plan above.")
# 2단계: 수정 요청 result = approval_app.invoke({ **result, "needs_revision": True }, config)
print("\nRevised plan created.")
# 3단계: 승인 후 실행 result = approval_app.invoke({ **result, "approved": True }, config)
print("\n=== Execution Result ===") print(result["result"])
6.2 인터럽트를 활용한 Human-in-the-Loop
from langgraph.graph import StateGraph from langgraph.checkpoint.memory import MemorySaver
definterrupt_before_execution(state): """실행 전 중단점""" # 이 노드 전에 실행이 중단됨 return state
# 그래프 구성 workflow = StateGraph(ApprovalState) workflow.add_node("plan", create_plan) workflow.add_node("execute", execute_plan)
defplanner(state: PlanExecuteState): """전체 계획 수립""" prompt = f""" Create a step-by-step plan to answer: {state['input']} Return as a numbered list. """ response = llm.invoke(prompt) # 계획을 리스트로 파싱 plan = [line.strip() for line in response.content.split('\n') if line.strip()] return {"plan": plan, "current_step": 0}
deffinalizer(state: PlanExecuteState): """최종 답변 생성""" prompt = f""" Based on these results: {state['results']} Provide a final answer to: {state['input']} """ response = llm.invoke(prompt) return {"final_answer": response.content}
# 그래프 구성 plan_execute_workflow = StateGraph(PlanExecuteState)
# 실행 result = plan_execute_app.invoke({ "input": "What are the key differences between Python and JavaScript?", "plan": [], "current_step": 0, "results": [], "final_answer": "" })
print("=== Plan ===") for i, step inenumerate(result["plan"], 1): print(f"{i}. {step}")
print("\n=== Results ===") for i, res inenumerate(result["results"], 1): print(f"Step {i}: {res}")
print("\n=== Final Answer ===") print(result["final_answer"])
10.2 Self-RAG (Self-Reflective RAG)
from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings
classSelfRAGState(TypedDict): question: str documents: list answer: str relevance_score: float iteration: int max_iterations: int
# 벡터 스토어 (가정) vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
defretrieve(state: SelfRAGState): """문서 검색""" k = 3 + (state.get("iteration", 0) * 2) # 반복마다 더 많은 문서 docs = vectorstore.similarity_search(state["question"], k=k) return {"documents": docs}
defgrade_documents(state: SelfRAGState): """문서 관련성 평가""" prompt = f""" Rate the relevance of these documents to the question on a scale of 0-1: Question: {state['question']} Documents: {[doc.page_content for doc in state['documents']]} Return only a number between 0 and 1. """ response = llm.invoke(prompt) try: score = float(response.content.strip()) except: score = 0.5
return {"relevance_score": score}
defgenerate_answer(state: SelfRAGState): """답변 생성""" context = "\n\n".join([doc.page_content for doc in state["documents"]]) prompt = f""" Answer the question based on this context: Context: {context} Question: {state['question']} """ response = llm.invoke(prompt) return { "answer": response.content, "iteration": state.get("iteration", 0) + 1 }
defshould_retry(state: SelfRAGState) -> str: """재시도 여부 결정""" if state["relevance_score"] < 0.7and state["iteration"] < state["max_iterations"]: return"retrieve"# 더 많은 문서 검색 return"end"
# 그래프 구성 self_rag_workflow = StateGraph(SelfRAGState)