from marinabox import mb_start_computer, mb_stop_computer, mb_use_computer_tool
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
from langgraph.graph import StateGraph, START, END
from langchain_anthropic import ChatAnthropic
from typing import Annotated, Literal
from langgraph.types import Command
# Set up tools and model
tools = [mb_use_computer_tool]
tool_node = ToolNode(tools=tools)
model_with_tools = ChatAnthropic(
model="claude-3-5-sonnet-20241022",
temperature=0
).bind_tools(tools)
# Define workflow logic
def should_continue(state: Annotated[dict, InjectedState()]):
messages = state["messages"]
if len(messages) > 0:
last_message = messages[-1]
if last_message.tool_calls:
return Command(goto="tool_node")
return Command(goto="stop_computer")
def call_model(state: Annotated[dict, InjectedState()]):
input_message = input("Enter your message: ")
if input_message != "stop_computer":
messages = [HumanMessage(content=input_message)]
response = model_with_tools.invoke(messages)
return {
"messages": [response],
"session_id": state.get("session_id")
}
return {
"messages": [],
"session_id": state.get("session_id")
}
# Create workflow
workflow = StateGraph(dict)
# Add nodes
workflow.add_node("start_computer", mb_start_computer)
workflow.add_node("agent", call_model)
workflow.add_node("tool_node", tool_node)
workflow.add_node("stop_computer", mb_stop_computer)
workflow.add_node("should_continue", should_continue)
# Add edges
workflow.add_edge(START, "start_computer")
workflow.add_edge("start_computer", "agent")
workflow.add_edge("tool_node", "agent")
workflow.add_edge("agent", "should_continue")
workflow.add_edge("stop_computer", END)
# Compile and run
app = workflow.compile()
app.invoke({"messages": ""})