Skip to content

Commit ee43d47

Browse files
committed
chore: bump deps for nextjs, react and copilotkit
Signed-off-by: Tyler Slaton <tyler@copilotkit.ai>
1 parent 2170f41 commit ee43d47

File tree

16 files changed

+1304
-2149
lines changed

16 files changed

+1304
-2149
lines changed

agent/.python-version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.12

agent/langgraph.json

Lines changed: 0 additions & 9 deletions
This file was deleted.
Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,37 @@
1-
"""
2-
This serves the "sample_agent" agent. This is an example of self-hosting an agent
3-
through our FastAPI integration. However, you can also host in LangGraph platform.
4-
"""
5-
61
import os
2+
import warnings
73
from dotenv import load_dotenv
8-
load_dotenv() # pylint: disable=wrong-import-position
9-
104
from fastapi import FastAPI
115
import uvicorn
6+
from src.agent import graph
127
from copilotkit import LangGraphAGUIAgent
13-
from sample_agent.agent import graph
148
from ag_ui_langgraph import add_langgraph_fastapi_endpoint
159

10+
_ = load_dotenv()
1611
app = FastAPI()
1712

1813
add_langgraph_fastapi_endpoint(
1914
app=app,
2015
agent=LangGraphAGUIAgent(
2116
name="sample_agent",
2217
description="An example agent to use as a starting point for your own agent.",
23-
graph=graph
18+
graph=graph,
2419
),
25-
path="/"
20+
path="/",
2621
)
2722

23+
2824
def main():
2925
"""Run the uvicorn server."""
3026
port = int(os.getenv("PORT", "8123"))
3127
uvicorn.run(
32-
"sample_agent.demo:app",
28+
"main:app",
3329
host="0.0.0.0",
3430
port=port,
3531
reload=True,
3632
)
33+
34+
35+
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
36+
if __name__ == "__main__":
37+
main()

agent/poetry.lock

Lines changed: 0 additions & 1943 deletions
This file was deleted.

agent/pyproject.toml

Lines changed: 14 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,18 @@
1-
[tool.poetry]
1+
[project]
22
name = "agent"
33
version = "0.1.0"
44
description = ""
5-
authors = ["CopilotKit"]
65
readme = "README.md"
7-
packages = [{include = "sample_agent"}]
8-
package-mode = false
9-
10-
[tool.poetry.dependencies]
11-
python = ">=3.10,<3.13"
12-
copilotkit = "0.1.55"
13-
langchain = "0.3.26"
14-
langchain-openai = "0.3.28"
15-
langgraph = "0.4.10"
16-
langsmith = "0.4.4"
17-
openai = "^1.68.2"
18-
fastapi = "^0.115.5"
19-
uvicorn = "^0.29.0"
20-
python-dotenv = "^1.0.0"
21-
22-
[tool.poetry.scripts]
23-
demo = "sample_agent.demo:main"
24-
25-
[build-system]
26-
requires = ["poetry-core"]
27-
build-backend = "poetry.core.masonry.api"
6+
requires-python = ">=3.10,<3.13"
7+
dependencies = [
8+
"copilotkit==0.1.70a0",
9+
"langchain==1.0.1",
10+
"langchain-openai==1.0.1",
11+
"langgraph==1.0.1",
12+
"openai==1.109.1",
13+
"fastapi==0.115.12",
14+
"uvicorn>=0.38.0",
15+
"python-dotenv>=1.0.0",
16+
"ag-ui-langgraph==0.0.18",
17+
"pydantic>=2.0.0,<3.0.0",
18+
]

agent/requirements.txt

Lines changed: 0 additions & 9 deletions
This file was deleted.
Lines changed: 39 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -3,75 +3,65 @@
33
It defines the workflow graph, state, tools, nodes and edges.
44
"""
55

6+
from importlib.resources import Resource
67
from typing import Any, List
7-
from typing_extensions import Literal
8-
from langchain_openai import ChatOpenAI
8+
9+
from copilotkit import CopilotKitState
10+
from langchain.tools import tool
911
from langchain_core.messages import SystemMessage
1012
from langchain_core.runnables import RunnableConfig
11-
from langchain.tools import tool
12-
from langgraph.graph import StateGraph, END
13-
from langgraph.types import Command
14-
from langgraph.graph import MessagesState
15-
from langgraph.prebuilt import ToolNode
13+
from langchain_openai import ChatOpenAI
1614
from langgraph.checkpoint.memory import MemorySaver
15+
from langgraph.graph import StateGraph
16+
from langgraph.prebuilt import ToolNode
17+
from langgraph.types import Command
18+
from typing_extensions import Literal
19+
20+
from src.util import should_route_to_tool_node
1721

18-
class AgentState(MessagesState):
22+
23+
class AgentState(CopilotKitState):
1924
"""
2025
Here we define the state of the agent
2126
2227
In this instance, we're inheriting from CopilotKitState, which will bring in
2328
the CopilotKitState fields. We're also adding a custom field, `language`,
2429
which will be used to set the language of the agent.
2530
"""
26-
proverbs: List[str] = []
31+
32+
proverbs: List[str]
2733
tools: List[Any]
2834
# your_custom_agent_state: str = ""
2935

36+
3037
@tool
3138
def get_weather(location: str):
3239
"""
3340
Get the weather for a given location.
3441
"""
3542
return f"The weather for {location} is 70 degrees."
3643

37-
# @tool
38-
# def your_tool_here(your_arg: str):
39-
# """Your tool description here."""
40-
# print(f"Your tool logic here")
41-
# return "Your tool response here."
4244

43-
tools = [
44-
get_weather
45-
# your_tool_here
46-
]
45+
tools = [get_weather]
4746

48-
async def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal["tool_node", "__end__"]]:
47+
48+
async def chat_node(
49+
state: AgentState, config: RunnableConfig
50+
) -> Command[Literal["tool_node", "__end__"]]:
4951
"""
50-
Standard chat node based on the ReAct design pattern. It handles:
51-
- The model to use (and binds in CopilotKit actions and the tools defined above)
52-
- The system prompt
53-
- Getting a response from the model
54-
- Handling tool calls
55-
56-
For more about the ReAct design pattern, see:
57-
https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg
52+
Standard chat node based on the ReAct design pattern.
5853
"""
5954

6055
# 1. Define the model
6156
model = ChatOpenAI(model="gpt-4o")
6257

6358
# 2. Bind the tools to the model
59+
fe_tools = state.get("tools", [])
6460
model_with_tools = model.bind_tools(
6561
[
66-
*state.get("tools", []), # bind tools defined by ag-ui
67-
get_weather,
68-
# your_tool_here
69-
],
70-
71-
# 2.1 Disable parallel tool calls to avoid race conditions,
72-
# enable this for faster performance if you want to manage
73-
# the complexity of running tool calls in parallel.
74-
parallel_tool_calls=False,
62+
*fe_tools,
63+
*tools,
64+
]
7565
)
7666

7767
# 3. Define the system message by which the chat model will be run
@@ -80,18 +70,21 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[Litera
8070
)
8171

8272
# 4. Run the model to generate a response
83-
response = await model_with_tools.ainvoke([
84-
system_message,
85-
*state["messages"],
86-
], config)
73+
response = await model_with_tools.ainvoke(
74+
[
75+
system_message,
76+
*state["messages"],
77+
],
78+
config,
79+
)
80+
81+
tool_calls = response.tool_calls
82+
if tool_calls and should_route_to_tool_node(tool_calls, fe_tools):
83+
return Command(goto="tool_node", update={"messages": response})
8784

8885
# 5. We've handled all tool calls, so we can end the graph.
89-
return Command(
90-
goto=END,
91-
update={
92-
"messages": response
93-
}
94-
)
86+
return Command(goto="__end__", update={"messages": response})
87+
9588

9689
# Define the workflow graph
9790
workflow = StateGraph(AgentState)

agent/src/util.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from langchain_core.messages import ToolCall
2+
3+
4+
def should_route_to_tool_node(tool_calls: list[ToolCall], fe_tools: list[ToolCall]):
5+
"""
6+
Returns True if none of the tool calls are frontend tools.
7+
8+
Args:
9+
tool_calls: List of tool calls from the model response
10+
fe_tools: List of frontend tool names
11+
12+
Returns:
13+
bool: True if all tool calls are backend tools, False if any are frontend tools
14+
"""
15+
if not tool_calls:
16+
return False
17+
18+
# Get the set of frontend tool names for faster lookup
19+
fe_tool_names = {tool.get("name") for tool in fe_tools}
20+
21+
# Check if any tool call is a frontend tool
22+
for tool_call in tool_calls:
23+
tool_name = (
24+
tool_call.get("name")
25+
if isinstance(tool_call, dict)
26+
else getattr(tool_call, "name", None)
27+
)
28+
if tool_name in fe_tool_names:
29+
return False
30+
31+
# None of the tool calls are frontend tools
32+
return True

0 commit comments

Comments
 (0)