joe=ConversableAgent("joe",system_message="Your name is Joe and you are a part of a duo of comedians.",llm_config={"config_list":[{"model":"gpt-4","temperature":0.7,"api_key":os.environ.get("OPENAI_API_KEY")}]},human_input_mode="NEVER",# Never ask for human input.is_termination_msg=lambdamsg:"good bye"inmsg["content"].lower(),)result=joe.initiate_chat(cathy,message="Cathy, tell me a joke and then say the words GOOD BYE.")
agent_with_number=ConversableAgent("agent_with_number",system_message="You are playing a game of guess-my-number. You have the ""number 53 in your mind, and I will try to guess it. ""If I guess too high, say 'too high', if I guess too low, say 'too low'. ",llm_config=llm_config,is_termination_msg=lambdamsg:"53"inmsg["content"],# terminate if the number is guessed by the other agenthuman_input_mode="NEVER",)agent_guess_number=ConversableAgent("agent_guess_number",system_message="I have a number in my mind, and you will try to guess it. ""If I say 'too high', you should guess a lower number. If I say 'too low', ""you should guess a higher number. ",llm_config=llm_config,human_input_mode="NEVER",)result=agent_with_number.initiate_chat(agent_guess_number,message="I have a number between 1 and 100. Guess it!",)
agent_with_number=ConversableAgent("agent_with_number",system_message="You are playing a game of guess-my-number. ""In the first game, you have the ""number 53 in your mind, and I will try to guess it. ""If I guess too high, say 'too high', if I guess too low, say 'too low'. ",llm_config=llm_config,max_consecutive_auto_reply=1,# maximum number of consecutive auto-replies before asking for human inputis_termination_msg=lambdamsg:"53"inmsg["content"],# terminate if the number is guessed by the other agenthuman_input_mode="TERMINATE",# ask for human input until the game is terminated)agent_guess_number=ConversableAgent("agent_guess_number",system_message="I have a number in my mind, and you will try to guess it. ""If I say 'too high', you should guess a lower number. If I say 'too low', ""you should guess a higher number. ",llm_config=llm_config,human_input_mode="NEVER",)result=agent_with_number.initiate_chat(agent_guess_number,message="I have a number between 1 and 100. Guess it!",)
human_proxy=ConversableAgent("human_proxy",llm_config=False,# no LLM used for human proxyhuman_input_mode="ALWAYS",# always ask for human input)# Start a chat with the agent with number with an initial guess.result=human_proxy.initiate_chat(agent_with_number,# this is the same agent with the number as beforemessage="10",)
fromautogen.codingimportCodeBlockfromautogen.coding.jupyterimportJupyterCodeExecutor,LocalJupyterServerfromautogenimportConversableAgentwithLocalJupyterServer()asserver:executor=JupyterCodeExecutor(server)code_executor_agent=ConversableAgent("code_executor_agent",llm_config=False,# Turn off LLM for this agent.code_execution_config={"executor":executor},# Use the local command line code executor.human_input_mode="NEVER",# Always take human input for this agent for safety.)message_with_code_block="""This is a message with code block.
The code block is below:
```python
print("Jupyter Code Executor test")
```
This is the end of the message.
"""# Generate a reply for the given code.reply=code_executor_agent.generate_reply(messages=[{"role":"user","content":message_with_code_block}])print(reply)'''
output:
>>>>>>>> EXECUTING CODE BLOCK (inferred language is python)... exitcode: 0 (execution succeeded) Code output: Jupyter Code Executor test
'''
Note:
在jupyter中使用Code Executor相较于CLI执行.py文件,要多一步连接Jupyter内核,也就是with LocalJupyterServer() as server这一步骤。连接内核后,再用此连接创建JupyterCodeExecutor。
Azure OpenAI API的Azure for Students无法执行Use Code Executor in Coversation的代码,原因是token超出限制(1k tokens per minutes)。虽然可以修改code_writer_system_message,尽量精简化其内容使得token数量少一点可以成功运行示例代码,但是prompt不够会导致结果不够准确。
importosfromautogenimportConversableAgent# Let's first define the assistant agent that suggests tool calls.assistant=ConversableAgent(name="Assistant",system_message="You are a helpful AI assistant. ""You can help with simple calculations. ""Return 'TERMINATE' when the task is done.",llm_config={"config_list":[{"model":"gpt-4","api_key":os.environ["OPENAI_API_KEY"]}]},)# The user proxy agent is used for interacting with the assistant agent# and executes tool calls.user_proxy=ConversableAgent(name="User",llm_config=False,is_termination_msg=lambdamsg:msg.get("content")isnotNoneand"TERMINATE"inmsg["content"],human_input_mode="NEVER",)# Register the tool signature with the assistant agent.# (calculator)是装饰器的用法assistant.register_for_llm(name="calculator",description="A simple calculator")(calculator)# Register the tool function with the user proxy agent.user_proxy.register_for_execution(name="calculator")(calculator)