Skip to content

Instantly share code, notes, and snippets.

@AsherVo
Created July 5, 2023 21:33
Show Gist options
  • Save AsherVo/0357b574cab032ae1f037969d0c61da9 to your computer and use it in GitHub Desktop.
Save AsherVo/0357b574cab032ae1f037969d0c61da9 to your computer and use it in GitHub Desktop.
Python script for chatting with ChatGPT. Modify the arguments for it to actually be useful.
import os
import openai
import asyncio
import time
import sys
import argparse
import tiktoken
import datetime
import re
import json
from aiohttp import ClientSession
## PROCESS ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument( "-m", "--model", type=str, help="Model")
parser.add_argument( "-t", "--temperature", type=str, help="Temperature")
parser.add_argument( "-c", "--chatfile", type=str, help="Chat File")
parser.add_argument( "-s", "--systemfile", type=str, help="System File")
parser.add_argument( "-l", "--loadfile", type=str, help="Load File")
parser.add_argument( "-d", "--document", type=str, help="Source Document")
args = vars( parser.parse_args() )
if not args[ 'model' ]:
args[ 'model' ] = 'gpt-3.5-turbo'
elif args[ 'model' ] == '4':
args[ 'model' ] = 'gpt-4'
if not args[ 'temperature' ]:
args[ 'temperature' ] = 0.2
else:
args[ 'temperature' ] = float( args[ 'temperature' ] )
loadedFile = None
if ( args[ 'loadfile' ] ):
with open( args[ 'loadfile' ] ) as f:
loadedFile = json.load( f )
sourceText = None
if ( args[ 'document' ] ):
with open( args[ 'document' ] ) as f:
sourceText = f.read()
## HELPER FUNCS
def num_tokens_from_messages(messages, model):
"""Returns the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
def input_message():
if args[ 'chatfile' ]:
input( f"[{ args[ 'chatfile' ] }]: " )
result = "_"
# Read chat file
with open(args[ 'chatfile' ], 'r') as file:
result = file.read();
sys.stdout.write( f"{ result }\n" )
# Erase chat file after submitting
with open(args[ 'chatfile' ], 'w') as file:
pass
return result
else:
return input( "[]: " )
def input_system():
if args[ 'systemfile' ]:
# Read chat file
with open(args[ 'systemfile' ], 'r') as file:
result = file.read();
# sys.stdout.write( f"{ result }\n" )
return result
else:
return "Your name is Sushi. You are Asher Vollmer's goofy little ragamuffin assistant. You are running in MacOS terminal."
## MAIN FUNC
openai.organization = "[YOUR ORGANIZATION]"
openai.api_key = "[YOUR API KEY]"
system_role = input_system();
print( system_role )
messages = [
{"role": "system", "content": system_role }
]
if ( loadedFile ):
print( loadedFile )
messages.extend( loadedFile )
elif ( sourceText ):
print( "Using source text: " + args[ 'document' ] )
messages.append(
{'role': 'user', 'name': 'SourceText', 'content': sourceText }
);
topic = ""
message = input_message()
if not message:
sys.exit()
topicResponse = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{"role": "system", "content": "You are a title generator." },
{'role': 'user', 'content': f"Generate a title for the following message that is 5 words or less, camelcase, no punctuation: { message }" }
]
)
cleanTopic = re.sub( r'[^\w\s]', '', topicResponse[ 'choices' ][ 0 ][ 'message' ][ 'content' ] )
topic = f"{ cleanTopic }-{ datetime.datetime.now() }.txt"
print( f"\033[33mSaving To File: {topic}\033[0m" )
with open(f"Chats/{ topic }", 'w') as file:
# Write some lines to the file
file.write( "[\n" )
while True:
messageData = {'role': 'user', 'content': message }
messages.append( messageData )
file.write( f"\t{ str( messageData ) },\n" )
# tokens = num_tokens_from_messages( messages, args[ 'model' ] )
# print( f"Tokens: {tokens}" )
# # send a ChatCompletion request to count to 100
response = openai.ChatCompletion.create(
model=args[ 'model' ],
messages=messages,
temperature=args[ 'temperature' ],
stream=True
)
fullResponse = ""
# STREAMING
sys.stdout.write( f"\033[36mAI: \033[36m" )
for chunk in response:
delta = chunk['choices'][0]['delta']
if 'content' in delta:
sys.stdout.flush()
sys.stdout.write( f"{ delta['content'] }" )
fullResponse += delta['content']
if ( '|' in delta[ 'content' ] ):
sys.stdout.write( "\033[32m" )
# NON STREAMING
# sys.stdout.write( f"\033[36m{ response[ 'choices' ][ 0 ][ 'message' ][ 'content' ] }\033[0m" )
sys.stdout.write( '\033[0m\n' )
messageData = {'role': 'assistant', 'name': 'assistant', 'content': fullResponse }
messages.append( messageData )
file.write( f"\t{ str( messageData ) },\n" )
message = input_message()
if not message:
break
file.write( "]" )
print( "Session Complete" );
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment