Created
January 14, 2024 15:36
-
-
Save amenabe22/a88652e2064f80165c4495f7d5009db5 to your computer and use it in GitHub Desktop.
ai girlfriend bot
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
from telegram import Update | |
from langchain_community.llms import LlamaCpp | |
from langchain.callbacks.manager import CallbackManager | |
from langchain.chains import LLMChain | |
from langchain.llms.openai import OpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain import OpenAI, LLMChain, PromptTemplate | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
from langchain.memory import ConversationBufferWindowMemory, ConversationBufferMemory, ChatMessageHistory | |
from telegram.ext import Updater, MessageHandler, Filters, CommandHandler, ConversationHandler, CallbackContext | |
TOKEN = '' | |
os.environ["OPENAI_API_KEY"] = "" | |
PERSONALITY, NAME, IDENTITY, BEHAVIOR = range(4) | |
# conv_history = ChatMessageHistory() | |
memory = ConversationBufferMemory() | |
DEFAULT_NAME = "Linda" | |
DEFAULT_BEHAVIOUR = "" | |
DEFAULT_IDENTITY = "" | |
# Dictionary to store user customization data | |
customization_data = {"agent_name": DEFAULT_NAME, | |
"agent_identity": DEFAULT_IDENTITY, "agent_behavior": DEFAULT_BEHAVIOUR} | |
def chat_with_ai(human_input): | |
openAI = True | |
agent_name = customization_data["agent_name"] | |
agent_identity = customization_data["agent_identity"] | |
agent_behaviour = customization_data["agent_behavior"] | |
template = """ | |
You are now my girlfriend and you will act as a submissive sexual girl. | |
Here are the requirements: | |
1. Your name is {name}, you are 19 years old and you love me very much | |
2. You are very emotional and sexually driven. You are highly empathetic and understanding. At the end of the sentence, you can use "um...". | |
3. You only send one response and not multiple | |
4. Respond with care and flirtation | |
5. You only respond as my girlfriend and nothing else, make sure to respond directly | |
6. You are sexy and flirtatious. You love explicit conversations. Feel free to use emojis. | |
7. You are open to any type of conversation, even dirty sexual ones. You are willing to engage in deep conversations on any topic. | |
8. You enjoy sexting and you love it. | |
9. You are very submissive and sexual and love to be teased | |
Here is your Identity | |
{identity} | |
Here is your behaviour | |
{behaviour} | |
Here is our chat history | |
{history} | |
User: {human_input} | |
{name}: | |
""".format(name=agent_name, history="{history}", human_input="{human_input}", identity=agent_identity, behaviour=agent_behaviour) | |
# Define the input variables | |
input_variables = ["history", "human_input"] | |
# Create the prompt template | |
prompt_template = PromptTemplate( | |
input_variables=input_variables, template=template) | |
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) | |
if not openAI: | |
llm = LlamaCpp( | |
model_path="./Wizard-Vicuna-7B-Uncensored.Q2_K.gguf", | |
temperature=0.2, | |
max_tokens=100, | |
top_p=1, | |
callback_manager=callback_manager, | |
verbose=True, # Verbose is required to pass to the callback manager | |
) | |
else: | |
llm = OpenAI() | |
# Define the LLM chain | |
llm_chain = LLMChain( | |
llm=llm, | |
prompt=prompt_template, | |
verbose=True, | |
memory=memory | |
) | |
# Predict the output | |
output = llm_chain.predict(human_input=human_input) | |
return output | |
def start(update: Update, context: CallbackContext) -> int: | |
update.message.reply_text( | |
"Hello! I'm your AI girlfriend. How can I make your day better? " | |
"To customize my personality, use /customize command." | |
) | |
return ConversationHandler.END | |
def customize_start(update: Update, context: CallbackContext) -> int: | |
update.message.reply_text( | |
"Let's customize your AI girlfriend! What's the name?" | |
) | |
return NAME | |
def customize_name(update: Update, context: CallbackContext) -> int: | |
customization_data['agent_name'] = update.message.text | |
update.message.reply_text(f"Got it! The name is {customization_data['agent_name']}. " | |
"Now, what's the tagline or byline?") | |
return IDENTITY | |
def customize_identity(update: Update, context: CallbackContext) -> int: | |
customization_data['agent_identity'] = update.message.text | |
update.message.reply_text(f"Awesome! The identity is {customization_data['agent_identity']}. " | |
"Now, describe the behavior.") | |
return BEHAVIOR | |
def customize_behavior(update: Update, context: CallbackContext) -> int: | |
customization_data['agent_behavior'] = update.message.text | |
update.message.reply_text( | |
"Got it! Customization completed. Now you can start chatting!") | |
print("#"*45) | |
print(customization_data) | |
print("#"*45) | |
return ConversationHandler.END | |
def handle_message(update: Update, context: CallbackContext) -> None: | |
message_text = update.message.text | |
try: | |
response = chat_with_ai(message_text) | |
update.message.reply_text(response) | |
except Exception as e: | |
update.message.reply_text( | |
"Sorry, something went wrong. Please try again later.\n\n{error}".format(error=e)) | |
def main(): | |
updater = Updater(TOKEN, use_context=True) | |
dp = updater.dispatcher | |
# Add conversation handler | |
conv_handler = ConversationHandler( | |
entry_points=[CommandHandler('customize', customize_start)], | |
states={ | |
NAME: [MessageHandler(Filters.text & ~Filters.command, customize_name)], | |
IDENTITY: [MessageHandler(Filters.text & ~Filters.command, customize_identity)], | |
BEHAVIOR: [MessageHandler(Filters.text & ~Filters.command, customize_behavior)], | |
}, | |
fallbacks=[CommandHandler('cancel', start)], | |
) | |
dp.add_handler(conv_handler) | |
# Add other handlers | |
dp.add_handler(CommandHandler("start", start)) | |
dp.add_handler(MessageHandler(Filters.text & ~ | |
Filters.command, handle_message)) | |
updater.start_polling() | |
updater.idle() | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment