LangGraph 入门
LangGraph 是 LangChain 团队推出的低级别 Agent 编排框架,它使用图(Graph)的概念来构建复杂的、有状态的 AI 工作流。
💡 LangGraph 与 LangChain Agent 的关系
LangChain 的 Agent 构建在 LangGraph 之上。这意味着当你使用 create_agent 创建 Agent 时,底层实际上是由 LangGraph 驱动的。
- 简单场景:直接使用
create_agent,无需了解 LangGraph - 复杂场景:需要自定义工作流、精细控制时,使用 LangGraph
官方建议:
如果你只是想快速构建 Agent 和自主应用,使用 LangChain。当你有更高级的需求,比如需要结合确定性和智能工作流、深度定制、精确控制延迟时,使用 LangGraph。
为什么需要 LangGraph
传统 Agent 的局限性:
| 问题 | 描述 |
|---|---|
| 控制流有限 | 难以实现复杂的条件分支和循环 |
| 状态管理 | 缺乏清晰的状态管理机制 |
| 可观测性 | 难以追踪执行流程 |
| 人机协作 | 难以在执行中插入人工审核 |
LangGraph 通过图结构解决了这些问题。
LangGraph 的核心优势
LangChain Agent 之所以能提供以下特性,正是因为它构建在 LangGraph 之上:
| 特性 | 描述 |
|---|---|
| 持久化执行 | 即使中断也能恢复执行状态 |
| 流式输出 | 实时返回中间结果 |
| 人机协作 | 在关键节点插入人工审核 |
| 状态持久化 | 保存和恢复对话/任务状态 |
| 可视化 | 图形化展示工作流程 |
核心概念
状态(State)
图中流转的数据:
from typing import TypedDict, Annotated
from operator import add
class AgentState(TypedDict):
messages: Annotated[list, add] # 消息列表,会累加
current_step: str # 当前步骤
result: str # 最终结果
节点(Node)
处理状态的函数:
def process_node(state: AgentState) -> AgentState:
# 处理状态,返回更新
return {"current_step": "processed"}
边(Edge)
节点之间的连接,可以是:
- 普通边 - 固定的下一步
- 条件边 - 根据状态决定下一步
安装 LangGraph
pip install langgraph
创建第一个 Graph
from langgraph.graph import StateGraph, END
from typing import TypedDict
# 1. 定义状态
class State(TypedDict):
input: str
output: str
# 2. 定义节点函数
def step1(state: State) -> State:
print("执行步骤 1")
return {"output": f"处理了: {state['input']}"}
def step2(state: State) -> State:
print("执行步骤 2")
return {"output": state["output"] + " -> 完成"}
# 3. 创建图
graph = StateGraph(State)
# 4. 添加节点
graph.add_node("step1", step1)
graph.add_node("step2", step2)
# 5. 添加边
graph.set_entry_point("step1") # 设置入口
graph.add_edge("step1", "step2") # step1 -> step2
graph.add_edge("step2", END) # step2 -> 结束
# 6. 编译
app = graph.compile()
# 7. 运行
result = app.invoke({"input": "Hello"})
print(result)
# {'input': 'Hello', 'output': '处理了: Hello -> 完成'}
条件分支
from langgraph.graph import StateGraph, END
from typing import TypedDict, Literal
class State(TypedDict):
input: str
category: str
output: str
def classify(state: State) -> State:
"""分类节点"""
text = state["input"].lower()
if "代码" in text or "python" in text:
category = "code"
elif "数学" in text or "计算" in text:
category = "math"
else:
category = "general"
return {"category": category}
def handle_code(state: State) -> State:
return {"output": f"[代码助手] 处理: {state['input']}"}
def handle_math(state: State) -> State:
return {"output": f"[数学助手] 处理: {state['input']}"}
def handle_general(state: State) -> State:
return {"output": f"[通用助手] 处理: {state['input']}"}
# 路由函数
def route(state: State) -> Literal["code", "math", "general"]:
return state["category"]
# 创建图
graph = StateGraph(State)
# 添加节点
graph.add_node("classify", classify)
graph.add_node("code", handle_code)
graph.add_node("math", handle_math)
graph.add_node("general", handle_general)
# 设置入口
graph.set_entry_point("classify")
# 添加条件边
graph.add_conditional_edges(
"classify",
route,
{
"code": "code",
"math": "math",
"general": "general"
}
)
# 所有处理节点都指向结束
graph.add_edge("code", END)
graph.add_edge("math", END)
graph.add_edge("general", END)
# 编译和运行
app = graph.compile()
print(app.invoke({"input": "写一段 Python 代码"}))
print(app.invoke({"input": "计算 1+1 等于多少"}))
print(app.invoke({"input": "今天天气怎么样"}))
循环结构
Agent 的核心特性是能够循环执行:
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated, Literal
from operator import add
class AgentState(TypedDict):
messages: Annotated[list, add]
iteration: int
def agent(state: AgentState) -> AgentState:
"""Agent 节点 - 决定下一步"""
iteration = state.get("iteration", 0) + 1
if iteration >= 3:
return {
"messages": [f"迭代 {iteration}: 任务完成"],
"iteration": iteration
}
else:
return {
"messages": [f"迭代 {iteration}: 需要继续"],
"iteration": iteration
}
def should_continue(state: AgentState) -> Literal["continue", "end"]:
"""决定是否继续"""
if state["iteration"] >= 3:
return "end"
return "continue"
# 创建图
graph = StateGraph(AgentState)
graph.add_node("agent", agent)
graph.set_entry_point("agent")
# 条件边:决定是继续还是结束
graph.add_conditional_edges(
"agent",
should_continue,
{
"continue": "agent", # 循环回 agent
"end": END
}
)
app = graph.compile()
result = app.invoke({"messages": [], "iteration": 0})
print("\n".join(result["messages"]))
使用 LLM 构建 Agent
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_core.tools import tool
from typing import TypedDict, Annotated, Literal
from operator import add
from dotenv import load_dotenv
load_dotenv()
# 定义工具
@tool
def search(query: str) -> str:
"""搜索信息"""
return f"搜索 '{query}' 的结果:找到了相关信息..."
@tool
def calculator(expression: str) -> str:
"""计算数学表达式"""
try:
return f"计算结果:{eval(expression)}"
except:
return "计算错误"
tools = [search, calculator]
tool_map = {t.name: t for t in tools}
# 创建 LLM
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
llm_with_tools = llm.bind_tools(tools)
# 定义状态
class AgentState(TypedDict):
messages: Annotated[list, add]
# Agent 节点
def agent_node(state: AgentState) -> AgentState:
response = llm_with_tools.invoke(state["messages"])
return {"messages": [response]}
# 工具执行节点
def tool_node(state: AgentState) -> AgentState:
last_message = state["messages"][-1]
tool_messages = []
for tool_call in last_message.tool_calls:
tool = tool_map[tool_call["name"]]
result = tool.invoke(tool_call["args"])
tool_messages.append(ToolMessage(
content=str(result),
tool_call_id=tool_call["id"]
))
return {"messages": tool_messages}
# 路由函数
def should_continue(state: AgentState) -> Literal["tools", "end"]:
last_message = state["messages"][-1]
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
return "end"
# 创建图
graph = StateGraph(AgentState)
graph.add_node("agent", agent_node)
graph.add_node("tools", tool_node)
graph.set_entry_point("agent")
graph.add_conditional_edges(
"agent",
should_continue,
{
"tools": "tools",
"end": END
}
)
graph.add_edge("tools", "agent") # 工具执行后回到 agent
app = graph.compile()
# 运行
result = app.invoke({
"messages": [HumanMessage(content="搜索 Python 的发明者,然后计算 2024 - 1991")]
})
for msg in result["messages"]:
if isinstance(msg, AIMessage) and msg.content:
print(f"AI: {msg.content}")
人工审核(Human-in-the-loop)
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from typing import TypedDict, Literal
class State(TypedDict):
task: str
plan: str
approved: bool
result: str
def create_plan(state: State) -> State:
"""创建计划"""
return {"plan": f"执行任务的计划:{state['task']}"}
def execute_plan(state: State) -> State:
"""执行计划"""
return {"result": f"已完成:{state['plan']}"}
def check_approval(state: State) -> Literal["execute", "wait"]:
"""检查是否批准"""
if state.get("approved"):
return "execute"
return "wait"
# 创建图
graph = StateGraph(State)
graph.add_node("plan", create_plan)
graph.add_node("execute", execute_plan)
graph.set_entry_point("plan")
# 计划后需要人工审核
graph.add_conditional_edges(
"plan",
check_approval,
{
"execute": "execute",
"wait": END # 等待人工审核
}
)
graph.add_edge("execute", END)
# 使用检查点保存状态
memory = MemorySaver()
app = graph.compile(checkpointer=memory)
# 第一次运行 - 创建计划并暂停
config = {"configurable": {"thread_id": "task-1"}}
result = app.invoke({"task": "发送邮件", "approved": False}, config)
print(f"计划已创建:{result['plan']}")
print("等待人工审核...")
# 人工审核后继续
# 更新状态并继续执行
result = app.invoke({"approved": True}, config)
print(f"执行结果:{result.get('result', '等待中')}")
子图(Subgraph)
将复杂逻辑封装为子图:
from langgraph.graph import StateGraph, END
from typing import TypedDict
class SubState(TypedDict):
data: str
processed: bool
class MainState(TypedDict):
input: str
sub_result: str
output: str
# 创建子图
def create_subgraph():
def process_a(state: SubState) -> SubState:
return {"data": state["data"] + " -> A"}
def process_b(state: SubState) -> SubState:
return {"data": state["data"] + " -> B", "processed": True}
subgraph = StateGraph(SubState)
subgraph.add_node("a", process_a)
subgraph.add_node("b", process_b)
subgraph.set_entry_point("a")
subgraph.add_edge("a", "b")
subgraph.add_edge("b", END)
return subgraph.compile()
# 主图
def prepare(state: MainState) -> MainState:
return {"sub_result": ""}
def run_subgraph(state: MainState) -> MainState:
subgraph = create_subgraph()
result = subgraph.invoke({"data": state["input"], "processed": False})
return {"sub_result": result["data"]}
def finalize(state: MainState) -> MainState:
return {"output": f"最终结果:{state['sub_result']}"}
graph = StateGraph(MainState)
graph.add_node("prepare", prepare)
graph.add_node("subgraph", run_subgraph)
graph.add_node("finalize", finalize)
graph.set_entry_point("prepare")
graph.add_edge("prepare", "subgraph")
graph.add_edge("subgraph", "finalize")
graph.add_edge("finalize", END)
app = graph.compile()
result = app.invoke({"input": "开始"})
print(result["output"]) # 最终结果:开始 -> A -> B
完整示例:智能客服系统
"""
智能客服系统 - LangGraph 示例
"""
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from typing import TypedDict, Annotated, Literal
from operator import add
from dotenv import load_dotenv
load_dotenv()
# 状态定义
class CustomerServiceState(TypedDict):
messages: Annotated[list, add]
intent: str
sentiment: str
resolved: bool
transfer_to_human: bool
# LLM
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
# 节点函数
def analyze_intent(state: CustomerServiceState) -> CustomerServiceState:
"""分析用户意图"""
last_message = state["messages"][-1].content
prompt = f"""分析以下客户消息的意图:
消息:{last_message}
意图类别(只返回类别名):
- 咨询:询问产品信息或服务内容
- 投诉:表达不满或问题
- 退款:请求退款或退货
- 其他:其他类型
意图:"""
response = llm.invoke([HumanMessage(content=prompt)])
intent = response.content.strip().split(":")[0]
return {"intent": intent}
def analyze_sentiment(state: CustomerServiceState) -> CustomerServiceState:
"""分析用户情绪"""
last_message = state["messages"][-1].content
prompt = f"""分析以下消息的情绪:
消息:{last_message}
情绪(只返回一个词):积极/中性/消极
情绪:"""
response = llm.invoke([HumanMessage(content=prompt)])
sentiment = response.content.strip()
return {"sentiment": sentiment}
def handle_inquiry(state: CustomerServiceState) -> CustomerServiceState:
"""处理咨询"""
messages = [
SystemMessage(content="你是一个专业的客服,友好地回答客户的咨询。"),
*state["messages"]
]
response = llm.invoke(messages)
return {
"messages": [AIMessage(content=response.content)],
"resolved": True
}
def handle_complaint(state: CustomerServiceState) -> CustomerServiceState:
"""处理投诉"""
sentiment = state.get("sentiment", "中性")
if sentiment == "消极":
# 情绪较差,转人工
return {
"messages": [AIMessage(content="非常抱歉给您带来不好的体验,我这就为您转接人工客服,请稍等。")],
"transfer_to_human": True,
"resolved": False
}
messages = [
SystemMessage(content="你是一个专业的客服,耐心处理客户投诉,表示理解并提供解决方案。"),
*state["messages"]
]
response = llm.invoke(messages)
return {
"messages": [AIMessage(content=response.content)],
"resolved": True
}
def handle_refund(state: CustomerServiceState) -> CustomerServiceState:
"""处理退款"""
return {
"messages": [AIMessage(content="""感谢您的反馈。关于退款,请提供以下信息:
1. 订单号
2. 退款原因
3. 购买日期
我们会尽快为您处理。""")],
"resolved": True
}
def handle_other(state: CustomerServiceState) -> CustomerServiceState:
"""处理其他"""
messages = [
SystemMessage(content="你是一个友好的客服助手,帮助用户解决各种问题。"),
*state["messages"]
]
response = llm.invoke(messages)
return {
"messages": [AIMessage(content=response.content)],
"resolved": True
}
def transfer_human(state: CustomerServiceState) -> CustomerServiceState:
"""转人工"""
return {
"messages": [AIMessage(content="已为您转接人工客服,请稍候...")],
"transfer_to_human": True
}
# 路由函数
def route_by_intent(state: CustomerServiceState) -> str:
intent = state.get("intent", "其他")
if "咨询" in intent:
return "inquiry"
elif "投诉" in intent:
return "complaint"
elif "退款" in intent:
return "refund"
else:
return "other"
def check_transfer(state: CustomerServiceState) -> Literal["transfer", "end"]:
if state.get("transfer_to_human"):
return "transfer"
return "end"
# 创建图
def create_customer_service_graph():
graph = StateGraph(CustomerServiceState)
# 添加节点
graph.add_node("analyze_intent", analyze_intent)
graph.add_node("analyze_sentiment", analyze_sentiment)
graph.add_node("inquiry", handle_inquiry)
graph.add_node("complaint", handle_complaint)
graph.add_node("refund", handle_refund)
graph.add_node("other", handle_other)
graph.add_node("transfer", transfer_human)
# 设置入口和边
graph.set_entry_point("analyze_intent")
graph.add_edge("analyze_intent", "analyze_sentiment")
# 根据意图路由
graph.add_conditional_edges(
"analyze_sentiment",
route_by_intent,
{
"inquiry": "inquiry",
"complaint": "complaint",
"refund": "refund",
"other": "other"
}
)
# 检查是否需要转人工
for node in ["inquiry", "complaint", "refund", "other"]:
graph.add_conditional_edges(
node,
check_transfer,
{
"transfer": "transfer",
"end": END
}
)
graph.add_edge("transfer", END)
return graph.compile()
def main():
app = create_customer_service_graph()
print("=" * 50)
print(" 智能客服系统(LangGraph)")
print(" 输入 'quit' 退出")
print("=" * 50)
while True:
user_input = input("\n客户:").strip()
if user_input.lower() == "quit":
print("感谢使用,再见!")
break
if not user_input:
continue
result = app.invoke({
"messages": [HumanMessage(content=user_input)],
"intent": "",
"sentiment": "",
"resolved": False,
"transfer_to_human": False
})
# 输出结果
print(f"\n[意图: {result['intent']}, 情绪: {result['sentiment']}]")
for msg in result["messages"]:
if isinstance(msg, AIMessage):
print(f"客服:{msg.content}")
if __name__ == "__main__":
main()
选择指南:create_agent vs LangGraph
| 场景 | 推荐方案 |
|---|---|
| 快速构建简单 Agent | create_agent |
| 单一工具调用 | create_agent |
| 多步骤复杂工作流 | LangGraph |
| 需要条件分支逻辑 | LangGraph |
| 需要循环/重试机制 | LangGraph |
| 需要人工审批节点 | LangGraph |
| 多 Agent 协作 | LangGraph |
| 需要精确控制延迟 | LangGraph |
从 create_agent 迁移到 LangGraph
如果你的 Agent 需要更复杂的功能,可以这样迁移:
# 之前:使用 create_agent(简单场景)
from langchain.agents import create_agent
agent = create_agent(
model="gpt-4",
tools=[search, calculator],
system_prompt="你是一个助手"
)
# 之后:使用 LangGraph(复杂场景)
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
# 你获得了:
# - 自定义节点逻辑
# - 条件分支
# - 人工审核
# - 状态持久化
# - 错误处理
小结
本章介绍了:
✅ LangGraph 与 LangChain Agent 的关系
✅ LangGraph 的核心概念(状态、节点、边)
✅ 创建基础 Graph
✅ 条件分支和循环结构
✅ 使用 LLM 构建 Agent
✅ 人工审核机制
✅ 子图封装
✅ 完整的智能客服系统
✅ create_agent vs LangGraph 选择指南
下一步
掌握 LangGraph 后,让我们学习 LangSmith 来监控和优化你的 AI 应用。
练习
- 创建一个多步骤审批工作流
- 实现一个带有错误重试的 Agent
- 构建一个并行执行多任务的图
- 添加检查点实现对话恢复
- 将 create_agent 应用迁移到 LangGraph