Last active
May 26, 2025 22:45
-
-
Save mberman84/584b470962c15930340ff49ae4e28a02 to your computer and use it in GitHub Desktop.
app.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import autogen | |
config_list = [ | |
{ | |
'model': 'gpt-4', | |
'api_key': 'API_KEY' | |
} | |
] | |
llm_config={ | |
"request_timeout": 600, | |
"seed": 42, | |
"config_list": config_list, | |
"temperature": 0 | |
} | |
assistant = autogen.AssistantAgent( | |
name="CTO", | |
llm_config=llm_config, | |
system_message="Chief technical officer of a tech company" | |
) | |
user_proxy = autogen.UserProxyAgent( | |
name="user_proxy", | |
human_input_mode="NEVER", | |
max_consecutive_auto_reply=10, | |
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), | |
code_execution_config={"work_dir": "web"}, | |
llm_config=llm_config, | |
system_message="""Reply TERMINATE if the task has been solved at full satisfaction. | |
Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""" | |
) | |
task = """ | |
Write python code to output numbers 1 to 100, and then store the code in a file | |
""" | |
user_proxy.initiate_chat( | |
assistant, | |
message=task | |
) | |
task2 = """ | |
Change the code in the file you just created to instead output numbers 1 to 200 | |
""" | |
user_proxy.initiate_chat( | |
assistant, | |
message=task2 | |
) |
Similar error - even after renaming the folder to 'agen' instead of 'autogen'
Traceback (most recent call last): File "/Users/GuestUser/GitHub/agen/app.py", line 1, in import autogen ModuleNotFoundError: No module named 'autogen'
EDIT: Even though I was on version 3.11.4 I had to switch it manually clicking on the version number in the bottom of VS Code as it was on 3.9
try this:
which python
copy & paste the link
<link_to_python> -m pip install pyautogen
this worked for me^
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I'm getting stuck on the following error:
raise self._make_status_error_from_response(err.response) from None
openai.RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for gpt-4 in organization org-ehVxQx3I1TFqEcMydXbUkFUz on tokens per min (TPM): Limit 10000, Used 9343, Requested 1103. Please try again in 2.676s. Visit https://platform.openai.com/account/rate-limits to learn more.', 'type': 'tokens', 'param': None, 'code': 'rate_limit_exceeded'}}
what can i do with the rate-limiting error?