Last active
November 21, 2023 11:05
-
-
Save amitkot/08037aee37d29b71064549b10e286cee to your computer and use it in GitHub Desktop.
Query OpenAI
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from openai import OpenAI | |
client = OpenAI(api_key="API_KEY_HERE") | |
def query_gpt(prompt): | |
""" | |
This function queries OpenAI's GPT model for a response to a given prompt. | |
Parameters: | |
prompt (str): A string containing the prompt to send to the model. | |
Returns: | |
str: The response from the GPT model. | |
""" | |
try: | |
response = client.chat.completions.create( | |
model="gpt-4", # Replace with the correct model name | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": prompt}, | |
], | |
max_tokens=150, | |
) | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
return f"An error occurred: {e}" | |
# Example usage | |
prompt = ( | |
"Please tell me all the things that I can make with a banana and some soy sauce" | |
) | |
response = query_gpt(prompt) | |
print(response) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from openai import OpenAI | |
client = OpenAI(api_key="API_KEY_HERE") | |
def stream_gpt_response( | |
model: str, | |
prompt: str, | |
temperature: float = 1, | |
frequency_penalty: float = 0, | |
presence_penalty: float = 0, | |
) -> None: | |
""" | |
This function streams responses from OpenAI's GPT model for a given prompt. | |
Parameters: | |
model (str): The name of the model to use. | |
prompt (str): The prompt to send to the model. | |
temperature (float): | |
The temperature of the model. Higher values mean the model will take more risks. | |
Values must be between 0 and 1. Defaults to 1. | |
frequency_penalty (float): | |
The frequency penalty of the model. Lower values will mean the model will repeat itself less often. | |
Values must be between 0 and 1. Defaults to 0. | |
presence_penalty (float): | |
The presence penalty of the model. Lower values will mean the model will talk about new topics more often. | |
Values must be between 0 and 1. Defaults to 0. | |
Returns: | |
None: Prints the response parts as they are received. | |
""" | |
try: | |
stream = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt}], | |
temperature=temperature, | |
frequency_penalty=frequency_penalty, | |
presence_penalty=presence_penalty, | |
stream=True, | |
) | |
for part in stream: | |
print(part.choices[0].delta.content or "", end="", flush=True) | |
except Exception as e: | |
print(f"An error occurred: {e}") | |
# Example usage | |
prompt = ( | |
"Please tell me all the things that I can make with a banana and some soy sauce" | |
) | |
stream_gpt_response(prompt=prompt, model="gpt-4") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment