from typing import Annotated from typing_extensions import TypedDict from langgraph.graph import StateGraph, START, END from langgraph.graph.message import add_messages from langchain_openai import ChatOpenAI
# 初始化模型 llm = ChatOpenAI()
# 定义图的状态信息 classState(TypedDict): # Messages have the type "list". The `add_messages` function # in the annotation defines how this state key should be updated # (in this case, it appends messages to the list, rather than overwriting them) messages: Annotated[list, add_messages] # 定义图节点 defchatbot(state: State): return {"messages": [llm.invoke(state["messages"])]}
# 执行图 user_input = '介绍你自己' for event in graph.stream({"messages": [("user", user_input)]}): for value in event.values(): print("Assistant:", value["messages"][-1].content)
ifisinstance(state, list): ai_message = state[-1] elif messages := state.get("messages", []): ai_message = messages[-1] else: raise ValueError(f"No messages found in input state to tool_edge: {state}") ifhasattr(ai_message, "tool_calls") andlen(ai_message.tool_calls) > 0: return"tools" return END
# The `tools_condition` function returns "tools" if the chatbot asks to use a tool, and "END" if # it is fine directly responding. This conditional routing defines the main agent loop. graph_builder.add_conditional_edges( "chatbot", route_tools, {"tools": "tools", END: END}, )
events = graph.stream( {"messages": [("user", user_input)]}, config, stream_mode="values" ) for event in events: if"messages"in event: event["messages"][-1].pretty_print()
================ Human Message ============= 你好,我是小王。你能介绍你自己给我认识吗? ================= Ai Message ================ 你好,小王!我是一个人工智能助手,名叫 ChatGLM。我基于清华大学 KEG 实验室和智谱 AI 公司于 2024 年共同训练的语言模型 GLM开发而成。我的任务是针对用户的问题和要求提供适当的答复和支持。有什么可以帮助您的吗?
1 2 3 4 5 6 7 8 9
user_input = "还记得我的名字吗?" config = {"configurable": {"thread_id": "1"}} # The config is the **second positional argument** to stream() or invoke()! events = graph.stream( {"messages": [("user", user_input)]}, config, stream_mode="values" ) for event in events: if"messages"in event: event["messages"][-1].pretty_print()
================ Human Message ============ 还记得我的名字吗? ================ Ai Message ================ 很抱歉,作为一个AI,我没有能力记住个人用户的信息,包括他们的名字。每次与我互动时,我都将作为一个全新的对话开始。如果您愿意,可以再次告诉我您的名字。
user_input = "指环王的豆瓣评分有多少" config = {"configurable": {"thread_id": "1"}} # The config is the **second positional argument** to stream() or invoke()! events = graph.stream( {"query": user_input}, config, stream_mode="values" ) for event in events: print(event)
该问题会被认为需要工具调用才能回答
1 2
snapshot = graph.get_state(config) snapshot.next
(‘tools’,)
继续图的执行,可以看到调用了模拟的函数
1 2 3 4 5
events = graph.stream( None, config, stream_mode="values" ) for event in events: print(event)
使用工具
● 2. 人为介入
1 2 3 4 5 6 7 8
user_input = "指环王的豆瓣评分有多少" config = {"configurable": {"thread_id": "2"}} # The config is the **second positional argument** to stream() or invoke()! events = graph.stream( {"query": user_input}, config, stream_mode="values" ) for event in events: print(event)
1 2
snapshot = graph.get_state(config) snapshot.next
(‘tools’,)
从这里开始,修改图的状态,强制改为「闲聊」
1 2 3 4
graph.update_state( config, {"category": "闲聊"}, )
可以看到下一步为「闲聊」,说明人为介入成功了
1 2
snapshot = graph.get_state(config) snapshot.next
(‘chatbot’,)
「闲聊」模式是调用模型自身能力回答的结果,因此可以看到有以下输出
1 2 3 4 5
events = graph.stream( None, config, stream_mode="values" ) for event in events: print(event)
《指环王》系列电影在豆瓣上的评分如下:
《指环王:护戒使者》(The Lord of the Rings: The Fellowship of the Ring)的豆瓣评分为9.1分。
《指环王:双塔奇兵》(The Lord of the Rings: The Two Towers)的豆瓣评分为9.2分。
《指环王:王者归来》(The Lord of the Rings: The Return of the King)的豆瓣评分为9.3分。