import asyncio
import json
from langchain.agents import AgentExecutor, create_structured_chat_agent, Tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, ToolCall
from langchain_community.chat_models.fake import FakeMessagesListChatModel
# 1. Define a simple, predictable tool
def simple_tool_function(input: str) -> str:
"""A simple tool that returns a fixed string."""
print(f"Tool called with input: '{input}'")
return "The tool says hello back!"
tools = [
Tool(
name="simple_tool",
func=simple_tool_function,
description="A simple test tool.",
)
]
# 2. Create responses that follow the structured chat format
responses = [
# First response: Agent decides to use a tool
AIMessage(
content=json.dumps({
"action": "simple_tool",
"action_input": {"input": "hello"}
})
),
# Second response: Agent provides final answer after tool execution
AIMessage(
content=json.dumps({
"action": "Final Answer",
"action_input": "The tool call was successful. The tool said: 'The tool says hello back!'"
})
),
]
# Use the modern FakeMessagesListChatModel
llm = FakeMessagesListChatModel(responses=responses)
# 3. Create the prompt using the standard structured chat prompt format
prompt = ChatPromptTemplate.from_messages([
("system", """Respond to the human as helpfully and accurately as possible. You have access to the following tools:
{tools}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Provide only ONE action per $JSON_BLOB, as shown:
```
{{
"action": $TOOL_NAME,
"action_input": $INPUT
}}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation as needed)
Thought: I know what to respond
Action:
```
{{
"action": "Final Answer",
"action_input": "Final response to human"
}}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation"""),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# 4. Create the agent and executor
agent = create_structured_chat_agent(llm, tools, prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
handle_parsing_errors=True,
max_iterations=3
)
# 5. Run the agent
result = asyncio.run(agent_executor.ainvoke({"input": "call the tool"}))
< /code>
Требования к Python Я использую: < /p>
langchain==0.3.27
langchain-community==0.3.27
langchain-core==0.3.74
langchain-aws==0.2.30
langchain-openai==0.3.29
версия Python: 3.9
Как избавиться от значения ошибки.>
Это очень простой пример, как я пытаюсь использовать Langchain, чтобы вызвать LLM и найти инструмент для использования: < /p> [code]import asyncio import json from langchain.agents import AgentExecutor, create_structured_chat_agent, Tool from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.messages import AIMessage, ToolCall from langchain_community.chat_models.fake import FakeMessagesListChatModel
# 1. Define a simple, predictable tool def simple_tool_function(input: str) -> str: """A simple tool that returns a fixed string.""" print(f"Tool called with input: '{input}'") return "The tool says hello back!"
# 2. Create responses that follow the structured chat format responses = [ # First response: Agent decides to use a tool AIMessage( content=json.dumps({ "action": "simple_tool", "action_input": {"input": "hello"} }) ), # Second response: Agent provides final answer after tool execution AIMessage( content=json.dumps({ "action": "Final Answer", "action_input": "The tool call was successful. The tool said: 'The tool says hello back!'" }) ), ]
# Use the modern FakeMessagesListChatModel llm = FakeMessagesListChatModel(responses=responses)
# 3. Create the prompt using the standard structured chat prompt format prompt = ChatPromptTemplate.from_messages([ ("system", """Respond to the human as helpfully and accurately as possible. You have access to the following tools:
{tools}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or {tool_names}
Question: input question to answer Thought: consider previous and subsequent steps Action: ``` $JSON_BLOB ``` Observation: action result ... (repeat Thought/Action/Observation as needed) Thought: I know what to respond Action: ``` {{ "action": "Final Answer", "action_input": "Final response to human" }} ```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation"""), ("human", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ])
# 5. Run the agent result = asyncio.run(agent_executor.ainvoke({"input": "call the tool"})) < /code> Требования к Python Я использую: < /p> langchain==0.3.27 langchain-community==0.3.27 langchain-core==0.3.74 langchain-aws==0.2.30 langchain-openai==0.3.29 [/code] версия Python: 3.9 Как избавиться от значения ошибки.>
Это очень простой пример, как я пытаюсь использовать Langchain, чтобы вызвать LLM и найти инструмент для использования
import asyncio
import json
from langchain.agents import AgentExecutor, create_structured_chat_agent, Tool
from...
Я хочу знать, есть ли способ преобразовать мои запросы Amazon MWS Scratchpad в API -вызов
e.g. При использовании MWS Prackpad мне дают строку для подписи
mws.amazonservices.co.uk
. /Products/2011-10-01
....