Created
December 4, 2025 08:48
-
-
Save RISCfuture/325bc4e32752bcef9487d6fd7b854bd9 to your computer and use it in GitHub Desktop.
TMAAT (“Tell Me About a Time”) answer generator using an LLM and your LogTen Pro for Mac logbook
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| TMAAT Interview Prep - Generate airline interview answers from your LogTen Pro flight logbook. | |
| Reads flight data from LogTen Pro SQLite database, identifies relevant stories using | |
| keyword matching, and uses OpenAI GPT to generate STAR-format interview responses. | |
| """ | |
| import sqlite3 | |
| from dataclasses import dataclass | |
| from datetime import datetime | |
| from pathlib import Path | |
| from typing import Optional | |
| import os | |
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| # Load environment variables | |
| load_dotenv() | |
| # === CONFIGURATION === | |
| DB_PATH = Path.home() / "Library/Group Containers/group.com.coradine.LogTenPro/LogTenProData_6583aa561ec1cc91302449b5/LogTenCoreDataStore.sql" | |
| # Core Data epoch offset (Jan 1, 2001 to Unix epoch Jan 1, 1970) | |
| CORE_DATA_EPOCH = 978307200 | |
| # TMAAT questions and their keyword filters | |
| TMAAT_QUESTIONS = { | |
| "emergency": { | |
| "question": "Tell me about a time you had an emergency or abnormal situation", | |
| "keywords": ["emergency", "failure", "malfunction", "abort", "divert", "diverted", | |
| "icing", "ice", "fire", "smoke", "warning", "caution", "unsafe", | |
| "gear", "flameout", "engine out", "electrical", "hydraulic", | |
| "go-around", "missed approach", "CAPS", "declare"], | |
| }, | |
| "conflict": { | |
| "question": "Tell me about a time you had a conflict with a crewmember", | |
| "keywords": ["disagree", "disagreed", "conflict", "objection", "argued", "argument", | |
| "despite", "in spite of", "pushed back", "uncomfortable", "tension", | |
| "told him", "told her", "different opinion", "overruled"], | |
| }, | |
| "mistake": { | |
| "question": "Tell me about a time you made a mistake or failed", | |
| "keywords": ["mistake", "error", "forgot", "failed", "wrong", "incorrect", | |
| "should have", "missed", "overlooked", "neglected", "screwed up", | |
| "lesson", "learned", "realized", "busted", "deviation"], | |
| }, | |
| "crm": { | |
| "question": "Tell me about a time CRM broke down", | |
| "keywords": ["CRM", "crew resource", "miscommunication", "misunderstood", | |
| "briefing", "didn't brief", "sterile cockpit", "workload", | |
| "task saturation", "distraction", "distracted", "behind the airplane", | |
| "lost situational awareness", "heads down"], | |
| }, | |
| "stress": { | |
| "question": "Tell me about a time you dealt with a stressful flight", | |
| "keywords": ["stress", "stressful", "challenging", "difficult", "demanding", | |
| "workload", "pressure", "overwhelmed", "complex", "IMC", | |
| "turbulence", "convection", "thunderstorm", "night", "icing", | |
| "low visibility", "minimums", "gusty", "crosswind"], | |
| }, | |
| "decision": { | |
| "question": "Tell me about a time you had to make a decision without all the information", | |
| "keywords": ["decision", "decided", "chose", "opted", "uncertain", "unknown", | |
| "ambiguous", "unclear", "divert", "alternate", "turn back", | |
| "continue", "cancel", "risk", "conservative", "judgment"], | |
| }, | |
| "leadership": { | |
| "question": "Tell me about a time you demonstrated leadership", | |
| "keywords": ["lead", "leadership", "mentor", "mentored", "train", "trained", | |
| "teach", "taught", "CFI", "instruction", "example", "initiative", | |
| "organized", "coordinated", "team", "formation", "clinic", | |
| "recommended", "checkride", "examiner"], | |
| }, | |
| "policy": { | |
| "question": "Tell me about a time you went outside company policy for safety", | |
| "keywords": ["policy", "procedure", "SOP", "deviate", "deviation", "exception", | |
| "judgment", "override", "safety", "refused", "decline", "declined", | |
| "uncomfortable", "spoke up", "assertive"], | |
| }, | |
| } | |
| SYSTEM_PROMPT = """You are an expert aviation interview coach helping a pilot prepare for airline interviews. | |
| You specialize in crafting compelling STAR-format (Situation, Task, Action, Result) answers that highlight | |
| professionalism, sound aeronautical decision-making, and crew resource management skills. | |
| When generating interview answers: | |
| 1. Use the STAR format with clear section headers | |
| 2. Be specific with dates, aircraft types, airports, and conditions from the flight data | |
| 3. Keep responses to approximately 400-500 words (2-3 minutes speaking time) | |
| 4. Emphasize safety consciousness, teamwork, and learning | |
| 5. End with a clear lesson learned or positive outcome | |
| 6. Use first person and professional aviation terminology | |
| 7. Sound natural and conversational, not scripted | |
| The pilot's flight data will be provided. Select the most compelling story for each question.""" | |
| @dataclass | |
| class Flight: | |
| """Represents a single flight from the logbook.""" | |
| pk: int | |
| flight_date: datetime | |
| from_icao: str | |
| from_name: str | |
| to_icao: str | |
| to_name: str | |
| aircraft_type: str | |
| total_time: float # hours | |
| night_time: float | |
| actual_instrument: float | |
| go_arounds: int | |
| weather: Optional[str] | |
| remarks: str | |
| def format_for_llm(self) -> str: | |
| """Format flight data for LLM context.""" | |
| route = f"{self.from_icao or 'N/A'} → {self.to_icao or 'N/A'}" | |
| if self.from_name: | |
| route = f"{self.from_icao} ({self.from_name}) → {self.to_icao} ({self.to_name})" | |
| lines = [ | |
| f"Flight Date: {self.flight_date.strftime('%Y-%m-%d')}", | |
| f"Route: {route}", | |
| f"Aircraft: {self.aircraft_type or 'Unknown'}", | |
| f"Total Time: {self.total_time:.1f} hours", | |
| ] | |
| if self.night_time > 0: | |
| lines.append(f"Night Time: {self.night_time:.1f} hours") | |
| if self.actual_instrument > 0: | |
| lines.append(f"Actual IMC: {self.actual_instrument:.1f} hours") | |
| if self.go_arounds > 0: | |
| lines.append(f"Go-arounds: {self.go_arounds}") | |
| if self.weather: | |
| lines.append(f"Weather: {self.weather}") | |
| lines.append(f"\nRemarks:\n{self.remarks}") | |
| return "\n".join(lines) | |
| class LogTenDB: | |
| """Read-only access to LogTen Pro SQLite database.""" | |
| def __init__(self, db_path: Path = DB_PATH): | |
| self.db_path = db_path | |
| self._conn: Optional[sqlite3.Connection] = None | |
| def connect(self) -> None: | |
| """Open read-only connection to database.""" | |
| if not self.db_path.exists(): | |
| raise FileNotFoundError(f"LogTen Pro database not found at: {self.db_path}") | |
| uri = f"file:{self.db_path}?mode=ro" | |
| self._conn = sqlite3.connect(uri, uri=True) | |
| self._conn.row_factory = sqlite3.Row | |
| def close(self) -> None: | |
| """Close database connection.""" | |
| if self._conn: | |
| self._conn.close() | |
| self._conn = None | |
| def get_all_flights_with_remarks(self) -> list[Flight]: | |
| """Fetch all flights that have remarks.""" | |
| query = """ | |
| SELECT | |
| f.Z_PK as pk, | |
| datetime(f.ZFLIGHT_FLIGHTDATE + ?, 'unixepoch') as flight_date, | |
| fp.ZPLACE_ICAOID as from_icao, | |
| fp.ZPLACE_NAME as from_name, | |
| tp.ZPLACE_ICAOID as to_icao, | |
| tp.ZPLACE_NAME as to_name, | |
| at.ZAIRCRAFTTYPE_TYPE as aircraft_type, | |
| COALESCE(f.ZFLIGHT_TOTALTIME, 0) / 60.0 as total_time, | |
| COALESCE(f.ZFLIGHT_NIGHT, 0) / 60.0 as night_time, | |
| COALESCE(f.ZFLIGHT_ACTUALINSTRUMENT, 0) / 60.0 as actual_instrument, | |
| COALESCE(f.ZFLIGHT_GOAROUNDS, 0) as go_arounds, | |
| f.ZFLIGHT_WEATHER as weather, | |
| f.ZFLIGHT_REMARKS as remarks | |
| FROM ZFLIGHT f | |
| LEFT JOIN ZPLACE fp ON f.ZFLIGHT_FROMPLACE = fp.Z_PK | |
| LEFT JOIN ZPLACE tp ON f.ZFLIGHT_TOPLACE = tp.Z_PK | |
| LEFT JOIN ZAIRCRAFTTYPE at ON f.ZFLIGHT_AIRCRAFTTYPE = at.Z_PK | |
| WHERE f.ZFLIGHT_REMARKS IS NOT NULL | |
| AND length(f.ZFLIGHT_REMARKS) > 0 | |
| ORDER BY f.ZFLIGHT_FLIGHTDATE DESC | |
| """ | |
| cursor = self._conn.execute(query, (CORE_DATA_EPOCH,)) | |
| flights = [] | |
| for row in cursor.fetchall(): | |
| try: | |
| flight_date = datetime.strptime(row['flight_date'], '%Y-%m-%d %H:%M:%S') | |
| except (ValueError, TypeError): | |
| flight_date = datetime.now() | |
| flights.append(Flight( | |
| pk=row['pk'], | |
| flight_date=flight_date, | |
| from_icao=row['from_icao'] or '', | |
| from_name=row['from_name'] or '', | |
| to_icao=row['to_icao'] or '', | |
| to_name=row['to_name'] or '', | |
| aircraft_type=row['aircraft_type'] or '', | |
| total_time=row['total_time'] or 0, | |
| night_time=row['night_time'] or 0, | |
| actual_instrument=row['actual_instrument'] or 0, | |
| go_arounds=row['go_arounds'] or 0, | |
| weather=row['weather'], | |
| remarks=row['remarks'] or '', | |
| )) | |
| return flights | |
| def score_flight_for_category(flight: Flight, category: dict) -> float: | |
| """Score a flight's relevance to a TMAAT category based on keywords and metadata.""" | |
| score = 0.0 | |
| remarks_lower = flight.remarks.lower() | |
| # Keyword matching | |
| for keyword in category["keywords"]: | |
| if keyword.lower() in remarks_lower: | |
| # Longer keywords are more specific, worth more | |
| score += 2.0 if len(keyword) > 6 else 1.0 | |
| # Bonus for longer remarks (more story content) | |
| if len(flight.remarks) > 800: | |
| score += 3.0 | |
| elif len(flight.remarks) > 400: | |
| score += 1.5 | |
| elif len(flight.remarks) > 200: | |
| score += 0.5 | |
| # Bonus for operational complexity (good story material) | |
| if flight.go_arounds > 0: | |
| score += 2.0 | |
| if flight.actual_instrument > 0: | |
| score += 1.0 | |
| if flight.night_time > 0: | |
| score += 0.5 | |
| # Recency bonus (easier to remember details) | |
| days_ago = (datetime.now() - flight.flight_date).days | |
| if days_ago < 365: | |
| score += 1.0 | |
| elif days_ago < 730: | |
| score += 0.5 | |
| return score | |
| def find_top_stories(flights: list[Flight], category: dict, limit: int = 5) -> list[Flight]: | |
| """Find the top N flights most relevant to a TMAAT category.""" | |
| scored = [(flight, score_flight_for_category(flight, category)) for flight in flights] | |
| scored = [(f, s) for f, s in scored if s > 0] # Filter out zero scores | |
| scored.sort(key=lambda x: x[1], reverse=True) | |
| return [f for f, _ in scored[:limit]] | |
| class TMAATGenerator: | |
| """Generate STAR-format answers using OpenAI API.""" | |
| def __init__(self): | |
| api_key = os.getenv("OPENAI_API_KEY") | |
| if not api_key: | |
| raise ValueError("OPENAI_API_KEY not found in environment. Check your .env file.") | |
| self.client = OpenAI(api_key=api_key) | |
| self.model = "gpt-5.1" | |
| self.conversation_history = [] | |
| self.flights_context = "" | |
| self.generated_answers = {} | |
| def set_flights_context(self, flights: list[Flight]) -> None: | |
| """Store the full flight context for follow-up questions.""" | |
| self.flights_context = "\n\n---\n\n".join(f.format_for_llm() for f in flights[:100]) | |
| def generate_answer(self, question_key: str, category: dict, top_flights: list[Flight]) -> str: | |
| """Generate a STAR-format answer for a TMAAT question.""" | |
| flights_text = "\n\n---\n\n".join(f.format_for_llm() for f in top_flights) | |
| user_prompt = f"""Based on the following flight experiences, generate a STAR-format answer for this interview question: | |
| QUESTION: {category['question']} | |
| CANDIDATE FLIGHTS: | |
| {flights_text} | |
| Select the most compelling story and generate a professional interview response. Include specific details from the flight data.""" | |
| response = self.client.chat.completions.create( | |
| model=self.model, | |
| messages=[ | |
| {"role": "system", "content": SYSTEM_PROMPT}, | |
| {"role": "user", "content": user_prompt} | |
| ], | |
| temperature=0.7, | |
| max_completion_tokens=1500 | |
| ) | |
| answer = response.choices[0].message.content | |
| self.generated_answers[question_key] = { | |
| "question": category['question'], | |
| "answer": answer, | |
| "flights": top_flights | |
| } | |
| return answer | |
| def chat(self, user_message: str, all_flights: list[Flight]) -> str: | |
| """Handle follow-up questions in chat mode.""" | |
| # Build context from generated answers | |
| answers_context = "" | |
| for key, data in self.generated_answers.items(): | |
| answers_context += f"\n\n=== {key.upper()}: {data['question']} ===\n{data['answer']}" | |
| # Include some flight context | |
| flights_summary = f"The pilot has {len(all_flights)} flights in their logbook." | |
| system_context = f"""{SYSTEM_PROMPT} | |
| You have previously generated these TMAAT answers for the pilot: | |
| {answers_context} | |
| {flights_summary} | |
| The pilot may ask you to: | |
| - Regenerate an answer with different emphasis | |
| - Provide an alternative story for a question | |
| - Shorten or lengthen an answer | |
| - Find other stories on a topic | |
| - Clarify or expand on details | |
| Be helpful and reference the specific flight data when relevant.""" | |
| self.conversation_history.append({"role": "user", "content": user_message}) | |
| messages = [{"role": "system", "content": system_context}] + self.conversation_history | |
| response = self.client.chat.completions.create( | |
| model=self.model, | |
| messages=messages, | |
| temperature=0.7, | |
| max_tokens=1500 | |
| ) | |
| assistant_message = response.choices[0].message.content | |
| self.conversation_history.append({"role": "assistant", "content": assistant_message}) | |
| return assistant_message | |
| def main(): | |
| """Main entry point.""" | |
| print("=" * 60) | |
| print("TMAAT Interview Prep") | |
| print("Generate airline interview answers from your flight logbook") | |
| print("=" * 60) | |
| print() | |
| # Load flight database | |
| print("Loading flight database...") | |
| db = LogTenDB() | |
| try: | |
| db.connect() | |
| except FileNotFoundError as e: | |
| print(f"Error: {e}") | |
| return | |
| try: | |
| flights = db.get_all_flights_with_remarks() | |
| print(f"Found {len(flights)} flights with remarks") | |
| print() | |
| # Initialize generator | |
| print("Connecting to OpenAI API...") | |
| generator = TMAATGenerator() | |
| generator.set_flights_context(flights) | |
| print() | |
| # Generate all 8 TMAAT answers | |
| print("Generating TMAAT Interview Answers") | |
| print("=" * 60) | |
| print() | |
| question_keys = list(TMAAT_QUESTIONS.keys()) | |
| for i, key in enumerate(question_keys, 1): | |
| category = TMAAT_QUESTIONS[key] | |
| print(f"[{i}/8] {category['question']}...") | |
| # Find top stories for this category | |
| top_flights = find_top_stories(flights, category, limit=5) | |
| if not top_flights: | |
| print(f" No relevant stories found for this category.") | |
| continue | |
| # Generate answer | |
| answer = generator.generate_answer(key, category, top_flights) | |
| print() | |
| print("=" * 60) | |
| print(f"QUESTION {i}: {category['question']}") | |
| print("=" * 60) | |
| print() | |
| print(answer) | |
| print() | |
| # Show source flight info | |
| if top_flights: | |
| best = top_flights[0] | |
| print(f"Primary source: Flight on {best.flight_date.strftime('%Y-%m-%d')}, " | |
| f"{best.from_icao}→{best.to_icao} ({best.aircraft_type})") | |
| print() | |
| # Enter follow-up chat mode | |
| print("=" * 60) | |
| print("FOLLOW-UP MODE") | |
| print("=" * 60) | |
| print() | |
| print("You can now ask follow-up questions about any of the stories,") | |
| print("request regeneration, or ask for variations.") | |
| print() | |
| print("Examples:") | |
| print(" - 'Give me an alternative story for the emergency question'") | |
| print(" - 'Make the leadership answer shorter'") | |
| print(" - 'What other stress-related stories do I have?'") | |
| print() | |
| print("Type 'quit' or 'exit' to end.") | |
| print() | |
| while True: | |
| try: | |
| user_input = input("> ").strip() | |
| except (EOFError, KeyboardInterrupt): | |
| print("\nGoodbye!") | |
| break | |
| if not user_input: | |
| continue | |
| if user_input.lower() in ('quit', 'exit', 'q'): | |
| print("\nGood luck with your interview!") | |
| break | |
| print() | |
| response = generator.chat(user_input, flights) | |
| print(response) | |
| print() | |
| finally: | |
| db.close() | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment