Created
January 2, 2025 19:34
-
-
Save Zetaphor/ad6a8669414f6a8637815ef187bdb373 to your computer and use it in GitHub Desktop.
Chainforge - OpenAI Compatible API Provider
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from chainforge.providers import provider | |
import openai | |
from typing import Optional, Any, List, Dict, Union | |
# Define ChatHistory type locally | |
ChatMessage = Dict[str, str] # {"role": str, "content": str} | |
ChatHistory = List[ChatMessage] | |
# Schema for provider settings | |
OPENAI_COMPATIBLE_SETTINGS_SCHEMA = { | |
"settings": { | |
"api_key": { | |
"type": "string", | |
"title": "API Key", | |
"description": "Your API key for the service" | |
}, | |
"base_url": { | |
"type": "string", | |
"title": "Base URL", | |
"description": "The base URL for the API (e.g., https://api.together.xyz/v1)", | |
"default": "https://api.openai.com/v1" | |
}, | |
"temperature": { | |
"type": "number", | |
"title": "Temperature", | |
"description": "Controls randomness in the response", | |
"default": 0.7, | |
"minimum": 0, | |
"maximum": 2.0, | |
"multipleOf": 0.1 | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"title": "Max Tokens", | |
"description": "Maximum length of the response", | |
"default": 1000, | |
"minimum": 1, | |
"maximum": 4096 | |
} | |
}, | |
"ui": { | |
"api_key": { | |
"ui:widget": "password" | |
}, | |
"temperature": { | |
"ui:widget": "range", | |
"ui:help": "Higher values make output more random, lower values more deterministic" | |
}, | |
"max_tokens": { | |
"ui:widget": "range", | |
"ui:help": "Maximum number of tokens to generate" | |
} | |
} | |
} | |
@provider( | |
name="OpenAI Compatible", | |
emoji="🤝", | |
models=[ | |
"gpt-3.5-turbo", # Default OpenAI models | |
"gpt-4", | |
"claude-3-opus-20240229", # Together.ai models | |
"claude-3-sonnet-20240229", | |
"mistral-7b-instruct", | |
"mixtral-8x7b-instruct", | |
"neural-chat-7b", | |
], | |
rate_limit="sequential", | |
settings_schema=OPENAI_COMPATIBLE_SETTINGS_SCHEMA | |
) | |
def openai_compatible_completion( | |
prompt: str, | |
model: Optional[str], | |
chat_history: Optional[ChatHistory] = None, | |
api_key: str = "", | |
base_url: str = "https://api.openai.com/v1", | |
temperature: float = 0.7, | |
max_tokens: int = 1000, | |
**kwargs: Any | |
) -> str: | |
# Configure the client | |
client = openai.OpenAI( | |
api_key=api_key, | |
base_url=base_url | |
) | |
try: | |
# Prepare messages | |
messages = [] | |
# Add chat history if provided | |
if chat_history: | |
for msg in chat_history: | |
messages.append({ | |
"role": msg["role"], | |
"content": msg["content"] | |
}) | |
# Add the current prompt | |
messages.append({ | |
"role": "user", | |
"content": prompt | |
}) | |
# If no chat history, and it's a simple completion, use the completion endpoint | |
if not chat_history and not model.startswith(("gpt-", "claude-")): | |
response = client.completions.create( | |
model=model, | |
prompt=prompt, | |
temperature=temperature, | |
max_tokens=max_tokens, | |
**kwargs | |
) | |
return response.choices[0].text | |
# Otherwise use the chat completion endpoint | |
response = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
temperature=temperature, | |
max_tokens=max_tokens, | |
**kwargs | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
return f"Error: {str(e)}" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment