Created
January 24, 2025 05:35
-
-
Save jamessdixon/f65e15952ce1afca8ca750eea7ec458e to your computer and use it in GitHub Desktop.
classifier_maker_checker
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os\n", | |
"from transformers import CLIPProcessor, CLIPModel\n", | |
"from PIL import Image" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"def maker(image):\n", | |
" model = CLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n", | |
" processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n", | |
" descriptions = [\"A driver's license\", \"A passport\", \"A student ID\"]\n", | |
"\n", | |
" inputs = processor(text=descriptions, images=image, return_tensors=\"pt\", padding=True)\n", | |
" outputs = model(**inputs)\n", | |
" logits_per_image = outputs.logits_per_image \n", | |
" probs = logits_per_image.softmax(dim=1) \n", | |
" predicted_class = descriptions[probs.argmax()]\n", | |
" confidence = probs.max().item()\n", | |
"\n", | |
" if confidence > .5:\n", | |
" return predicted_class\n", | |
" else:\n", | |
" return None\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 28, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os \n", | |
"import io\n", | |
"import json\n", | |
"import base64\n", | |
"from openai import AzureOpenAI\n", | |
"\n", | |
"def checker(image, classification):\n", | |
" endpoint = \"https://aiftrained1464353287.openai.azure.com/\"\n", | |
" deployment = \"gpt-4o\" \n", | |
" subscription_key = \"55gTRdNCbkCBWbFwE75y2U3HAjzUxCiydhDovi5Ilqd5SMMsaup4JQQJ99BAACYeBjFXJ3w3AAAAACOGyNll\"\n", | |
"\n", | |
" client = AzureOpenAI( azure_endpoint=endpoint, api_key=subscription_key, api_version=\"2024-05-01-preview\",)\n", | |
" \n", | |
" buffered = io.BytesIO()\n", | |
" image.save(buffered, format=\"PNG\")\n", | |
" encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii') \n", | |
"\n", | |
" chat_prompt = [\n", | |
" {\n", | |
" \"role\": \"system\",\n", | |
" \"content\": [\n", | |
" {\n", | |
" \"type\": \"text\",\n", | |
" \"text\": \"You are an AI assistant that helps classify fictional legal documents.\"\n", | |
" }\n", | |
" ]\n", | |
" },\n", | |
" {\n", | |
" \"role\": \"user\",\n", | |
" \"content\": [\n", | |
" {\n", | |
" \"type\": \"text\",\n", | |
" \"text\": \"Is this document \" + classification + \"? \"\n", | |
" },\n", | |
" {\n", | |
" \"type\": \"image_url\",\n", | |
" \"image_url\": {\n", | |
" \"url\": f\"data:image/jpeg;base64,{encoded_image}\"\n", | |
" }\n", | |
" },\n", | |
" {\n", | |
" \"type\": \"text\",\n", | |
" \"text\": \"\\n\"\n", | |
" }\n", | |
" ]\n", | |
" }\n", | |
" ] \n", | |
"\n", | |
" messages = chat_prompt \n", | |
" completion = client.chat.completions.create(model=deployment, messages=messages, max_tokens=800, \n", | |
" temperature=0.7, top_p=0.95, frequency_penalty=0, presence_penalty=0, stop=None, stream=False)\n", | |
"\n", | |
" json_string = completion.to_json() \n", | |
" data = json.loads(json_string)\n", | |
" return data['choices'][0]['message']['content']\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 29, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"A driver's license\n", | |
"Based on the layout and elements such as the use of the word \"license,\" this document resembles a fictional driver's license, although the text and format appear distorted or nonsensical.\n" | |
] | |
} | |
], | |
"source": [ | |
"input_folder = 'data_new'\n", | |
"input_filename = '0.png'\n", | |
"image_path = os.path.join(input_folder, input_filename)\n", | |
"image = Image.open(image_path)\n", | |
"\n", | |
"classification = maker(image)\n", | |
"print(classification)\n", | |
"validation = checker(image, classification)\n", | |
"print(validation)" | |
] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": ".venv", | |
"language": "python", | |
"name": "python3" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.11.9" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment