from langchain.chat_models import init_chat_model from typing_extensions import TypedDict from typing importOptional, List from langgraph.graph import StateGraph, START, END from langgraph.types import interrupt import json
from langchain.tools import tool from langchain.chat_models import init_chat_model from langchain_core.messages import HumanMessage, ToolMessage, AnyMessage, SystemMessage from typing_extensions import TypedDict, Annotated import operator from typing importLiteral from langgraph.graph import StateGraph, START, END
model = init_chat_model( "deepseek-chat", model_provider="deepseek", streaming=True, temperature=0.7 )
# Define tools @tool defmultiply(a: int, b: int) -> int: """Multiply `a` and `b`. Args: a: First int b: Second int """ return a * b
@tool defadd(a: int, b: int) -> int: """Adds `a` and `b`. Args: a: First int b: Second int """ return a + b
@tool defdivide(a: int, b: int) -> float: """Divide `a` and `b`. Args: a: First int b: Second int """ return a / b
# Augment the LLM with tools tools = [add, multiply, divide] tools_by_name = {tool.name: tool for tool in tools} model_with_tools = model.bind_tools(tools)
# Step 2: Define state
classMessagesState(TypedDict): messages: Annotated[list[AnyMessage], operator.add] llm_calls: int
# Step 3: Define model node
defllm_call(state: dict): """LLM decides whether to call a tool or not"""
return { "messages": [ model_with_tools.invoke( [ SystemMessage( content="You are a helpful assistant tasked with performing arithmetic on a set of inputs." ) ] + state["messages"] ) ], "llm_calls": state.get('llm_calls', 0) + 1 }
# Step 4: Define tool node
deftool_node(state: dict): """Performs the tool call"""
result = [] for tool_call in state["messages"][-1].tool_calls: tool = tools_by_name[tool_call["name"]] observation = tool.invoke(tool_call["args"]) result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"])) return {"messages": result}
# Step 5: Define logic to determine whether to end
# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call defshould_continue(state: MessagesState) -> Literal["tool_node", END]: """Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
# Compile the agent agent = agent_builder.compile()
from IPython.display import Image, display # Show the agent display(Image(agent.get_graph(xray=True).draw_mermaid_png()))
# Invoke messages = [HumanMessage(content="Add 3 and 4.")] messages = agent.invoke({"messages": messages}) for m in messages["messages"]: m.pretty_print()