Guest User

Talk to the world's best AIs!

a guest
Dec 21st, 2025
113
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 23.73 KB | Software | 0 0
  1. import asyncio
  2. import os
  3. import time
  4. import functools
  5. from typing import AsyncGenerator, Dict
  6. from pathlib import Path
  7. import anthropic
  8. from openai import AsyncOpenAI, APIStatusError
  9. # === GOOGLE VERTEX AI (New Enterprise Library) ===
  10. import vertexai
  11. from vertexai.generative_models import GenerativeModel, HarmCategory, HarmBlockThreshold
  12. from logging_config import (
  13.     log_llm_request, log_llm_response, log_llm_error, log_llm_diagnostic
  14. )
  15.  
  16. # --- COMPATIBILITY FIX FOR PYTHON < 3.11 ---
  17. try:
  18.     from asyncio import timeout as timeout_ctx
  19. except ImportError:
  20.     try:
  21.         from async_timeout import timeout as timeout_ctx
  22.     except ImportError:
  23.         from contextlib import asynccontextmanager
  24.         @asynccontextmanager
  25.         async def timeout_ctx(sec):
  26.             yield
  27. # -------------------------------------------
  28.  
  29. @functools.lru_cache(maxsize=1)
  30. def load_system_prompt() -> str:
  31.     prompt_file = Path(__file__).parent / "system_prompt.txt"
  32.     if prompt_file.exists():
  33.         with open(prompt_file, 'r', encoding='utf-8') as f:
  34.             return f.read()
  35.     return """You are {name}, participating in a multi-AI roundtable.
  36.    Speak clearly and concisely. Do not use markdown for simple text."""
  37.  
  38. class AIClient:
  39.     def __init__(self, name: str, temperature: float, available: bool = False):
  40.         self.name = name
  41.         self.temperature = temperature
  42.         self.available = available
  43.         self.system_prompt = load_system_prompt().replace("{name}", name)
  44.         self.init_error = None
  45.  
  46.     async def verify_connection(self) -> bool:
  47.         """
  48.        The Lobby Check:
  49.        Returns True if the AI is actually responsive.
  50.        """
  51.         if not self.available:
  52.             return False
  53.            
  54.         print(f"🔨 Lobby Check: Pinging {self.name}...")
  55.         try:
  56.             # 30 second timeout for congested networks
  57.             async with timeout_ctx(30.0):
  58.                 await self._ping_provider()
  59.            
  60.             print(f"✅ Lobby Check: {self.name} is ONLINE.")
  61.             return True
  62.            
  63.         except asyncio.TimeoutError:
  64.             error_msg = "Connection timed out (30s)"
  65.             print(f"❌ Lobby Check: {self.name} failed. ({error_msg})")
  66.             self.available = False
  67.             self.init_error = error_msg
  68.             return False
  69.            
  70.         except Exception as e:
  71.             # 1. Capture the raw error representation if str(e) is empty
  72.             raw_error = repr(e)
  73.             error_msg = str(e) if str(e) else raw_error
  74.  
  75.             # 2. Extract specific status codes if available (OpenAI/Google style)
  76.             if hasattr(e, "status_code"):
  77.                 error_msg = f"API Error {e.status_code}: {error_msg}"
  78.            
  79.             # 3. Friendly check for the missing method error we just found
  80.             if "NotImplementedError" in raw_error:
  81.                 error_msg = "Code Implementation Missing (_ping_provider not defined)"
  82.  
  83.             print(f"❌ Lobby Check: {self.name} failed. ({error_msg})")
  84.             self.available = False
  85.             self.init_error = error_msg
  86.             return False
  87.  
  88.     async def _ping_provider(self):
  89.         raise NotImplementedError
  90.  
  91.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 45.0) -> AsyncGenerator[str, None]:
  92.         yield f"[{self.name} not implemented]\n"
  93.  
  94.  
  95. # === CLAUDE (Anthropic) ===
  96. class ClaudeClient(AIClient):
  97.     def __init__(self, settings):
  98.         super().__init__("Claude", settings.default_temperature, settings.is_claude_available)
  99.         self.model = settings.claude_model
  100.         if self.available:
  101.             self.client = anthropic.AsyncAnthropic(api_key=settings.anthropic_api_key)
  102.  
  103.     async def _ping_provider(self):
  104.         # Claude uses `max_tokens`
  105.         await self.client.messages.create(
  106.             model=self.model, max_tokens=1, messages=[{"role": "user", "content": "Hi"}]
  107.         )
  108.  
  109.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 60.0):
  110.         if not self.available:
  111.             yield f"[{self.name} unavailable – {self.init_error or 'Check API key'}]"
  112.             return
  113.        
  114.         cid_str = str(conversation_id)
  115.         log_llm_request("claude", len(messages), cid_str)
  116.        
  117.         clean_messages = []
  118.         for m in messages:
  119.             if m.get("role") == "system": continue
  120.             content = m["content"].rstrip() if isinstance(m["content"], str) else m["content"]
  121.             clean_messages.append({"role": m["role"], "content": content})
  122.  
  123.         full = ""
  124.         try:
  125.             async with timeout_ctx(timeout_sec):
  126.                 async with self.client.messages.stream(
  127.                     model=self.model,
  128.                     max_tokens=4096,
  129.                     temperature=self.temperature,
  130.                     system=self.system_prompt,
  131.                     messages=clean_messages,
  132.                 ) as stream:
  133.                     async for chunk in stream:
  134.                         if chunk.type == "content_block_delta" and getattr(chunk.delta, "text", None):
  135.                             text = chunk.delta.text
  136.                             full += text
  137.                             yield text
  138.             log_llm_response("claude", len(full), cid_str)
  139.         except Exception as e:
  140.             log_llm_error("claude", e, cid_str)
  141.             yield f"\n[Claude Error: {e}]\n"
  142.  
  143. # === GPT (OpenAI) - Updated for GPT-5.2 Responses API ===
  144. class GPTClient(AIClient):
  145.     def __init__(self, settings):
  146.         openai_api_key = os.getenv("OPENAI_API_KEY")
  147.  
  148.         if not openai_api_key or openai_api_key == "your_openai_api_key_here":
  149.             raise RuntimeError(
  150.                 f"Invalid OPENAI_API_KEY in OS environment: {openai_api_key!r}"
  151.             )
  152.  
  153.         super().__init__(
  154.             "GPT",
  155.             settings.default_temperature,  # not used by 5.2, but base class wants it
  156.             available=True
  157.         )
  158.  
  159.         self.model = settings.gpt_model  # "gpt-5.2"
  160.  
  161.         self.client = AsyncOpenAI(
  162.             api_key=openai_api_key,
  163.             timeout=120.0
  164.         )
  165.  
  166.     def _sanitize_for_responses(self, msgs):
  167.         """
  168.        Responses API is stricter than chat.completions:
  169.        - It rejects unknown keys like 'name', 'timestamp', etc.
  170.        - Keep only 'role' and 'content' (and stringify content defensively).
  171.        """
  172.         clean = []
  173.         for m in msgs:
  174.             role = m.get("role")
  175.             content = m.get("content")
  176.  
  177.             # Defensive: normalize content to string for providers that expect it
  178.             if isinstance(content, (list, dict)):
  179.                 content = str(content)
  180.             elif content is None:
  181.                 content = ""
  182.  
  183.             clean.append({"role": role, "content": content})
  184.         return clean
  185.  
  186.     async def _ping_provider(self):
  187.         await self.client.responses.create(
  188.             model=self.model,
  189.             input=[{"role": "user", "content": "Ping"}],
  190.             max_output_tokens=16,
  191.             reasoning={"effort": "medium"},
  192.         )
  193.  
  194.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 120.0):
  195.         if not self.available:
  196.             yield "[GPT unavailable – check OPENAI_API_KEY]"
  197.             return
  198.  
  199.         cid_str = str(conversation_id)
  200.         log_llm_request("gpt", len(messages), cid_str)
  201.  
  202.         full = ""
  203.         start_ts = time.time()
  204.         ttft = None
  205.  
  206.         # System + conversation messages
  207.         full_messages = [{"role": "system", "content": self.system_prompt}] + list(messages)
  208.         full_messages = self._sanitize_for_responses(full_messages)  # <-- FIX IS HERE
  209.  
  210.         try:
  211.             async with timeout_ctx(timeout_sec):
  212.                 stream = await self.client.responses.create(
  213.                     model=self.model,
  214.                     input=full_messages,
  215.                     max_output_tokens=4096,
  216.                     reasoning={"effort": "medium"},
  217.                     stream=True,
  218.                 )
  219.  
  220.                 async for event in stream:
  221.                     if event.type == "response.output_text.delta":
  222.                         text = event.delta
  223.                         if ttft is None:
  224.                             ttft = time.time() - start_ts
  225.                             log_llm_diagnostic("gpt", "latency", f"TTFT: {ttft:.4f}s", cid_str)
  226.                         full += text
  227.                         yield text
  228.  
  229.             total_time = time.time() - start_ts
  230.             log_llm_response("gpt", len(full), cid_str)
  231.             log_llm_diagnostic("gpt", "latency", f"Total: {total_time:.4f}s", cid_str)
  232.  
  233.         except asyncio.TimeoutError:
  234.             log_llm_error("gpt", Exception("Timeout"), cid_str)
  235.             yield f"\n[GPT timed out after {timeout_sec}s]\n"
  236.  
  237.         except APIStatusError as e:
  238.             log_llm_error("gpt", e, cid_str)
  239.             yield f"\n[GPT API Error {e.status_code}: {e.message}]\n"
  240.  
  241.         except Exception as e:
  242.             log_llm_error("gpt", e, cid_str)
  243.             yield f"\n[GPT Error: {e}]\n"
  244.  
  245.  
  246. # === GEMINI (Google Vertex AI) ===
  247. class GeminiClient(AIClient):
  248.     def __init__(self, settings):
  249.         # We assume availability is True if the user has done the gcloud login
  250.         # We pass 'True' for availability to bypass the API Key check in the parent class
  251.         super().__init__("Gemini", settings.default_temperature, True)
  252.        
  253.         # 1. Configuration for Vertex AI
  254.         self.project_id = "gen-lang-client-0732279764" # Your Specific Project ID
  255.        
  256.         # UPDATED: Pulling directly from config.py to fix the 404/Region issues
  257.         self.location = settings.gemini_location       # e.g., "global"
  258.         self.model_name = settings.gemini_model        # e.g., "gemini-3.0-pro-preview-1118"
  259.        
  260.         # 2. Initialize the Vertex AI Environment
  261.         try:
  262.             print(f"DEBUG: Initializing Vertex AI for Project {self.project_id} in {self.location}...")
  263.             # This uses the credentials from 'gcloud auth application-default login'
  264.             vertexai.init(project=self.project_id, location=self.location)
  265.            
  266.             # 3. Load the Model Object (This doesn't make a network call yet)
  267.             self.model = GenerativeModel(self.model_name)
  268.             print(f"DEBUG: Gemini Vertex Model loaded: {self.model_name}")
  269.            
  270.         except Exception as e:
  271.             print(f"DEBUG: Gemini Vertex Init failed: {e}")
  272.             print("DEBUG: Ensure you ran 'gcloud auth application-default login' in terminal.")
  273.             self.available = False
  274.  
  275.     async def _ping_provider(self):
  276.         # Vertex AI PING check
  277.         try:
  278.             # We use a tiny generation limit to just 'touch' the API
  279.             await asyncio.to_thread(
  280.                 self.model.generate_content,
  281.                 "Ping",
  282.                 generation_config={"max_output_tokens": 5}
  283.             )
  284.         except Exception as e:
  285.             print(f"DEBUG: Gemini Ping failed: {e}")
  286.             raise e
  287.  
  288.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 60.0):
  289.         if not self.available:
  290.             yield f"[{self.name} unavailable]"
  291.             return
  292.        
  293.         cid_str = str(conversation_id)
  294.         log_llm_request("gemini", len(messages), cid_str)
  295.         full_response = ""
  296.        
  297.         try:
  298.             # 1. Convert Messages to a Single Prompt String
  299.             # (Vertex often prefers raw text for preview models to avoid role issues)
  300.             prompt_text = self.system_prompt + "\n\n"
  301.             for m in messages:
  302.                 role_label = "Model" if m['role'] == "assistant" else "User"
  303.                 prompt_text += f"{role_label}: {m['content']}\n"
  304.             prompt_text += "\nModel:"
  305.            
  306.             # 2. Define Safety Settings
  307.             safety_config = {
  308.                 HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
  309.                 HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
  310.                 HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
  311.                 HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,
  312.             }
  313.  
  314.             # 3. Stream Generation
  315.             async with timeout_ctx(timeout_sec):
  316.                 response_stream = await asyncio.to_thread(
  317.                     self.model.generate_content,
  318.                     prompt_text,
  319.                     stream=True,
  320.                     safety_settings=safety_config,
  321.                     generation_config={
  322.                         "temperature": self.temperature,
  323.                         "max_output_tokens": 4096
  324.                     }
  325.                 )
  326.                
  327.                 for chunk in response_stream:
  328.                     if chunk.text:
  329.                         full_response += chunk.text
  330.                         yield chunk.text
  331.                        
  332.             log_llm_response("gemini", len(full_response), cid_str)
  333.            
  334.         except Exception as e:
  335.             log_llm_error("gemini", e, cid_str)
  336.             # Catch specific Google errors to give better hints in the UI
  337.             err_str = str(e)
  338.             if "403" in err_str:
  339.                 yield "\n[Gemini Error: 403 Permission Denied. Try running 'gcloud auth application-default login' again.]\n"
  340.             elif "429" in err_str:
  341.                 yield "\n[Gemini Error: 429 Quota Exceeded. The upgrade might still be syncing.]\n"
  342.             elif "404" in err_str:
  343.                 yield f"\n[Gemini Error: 404 Model Not Found. Verified location: {self.location}. Check config.py model name.]\n"
  344.             else:
  345.                 yield f"\n[Gemini Vertex Error: {e}]\n"
  346.  
  347.  
  348. # === GROK (xAI) ===
  349. class GrokClient(AIClient):
  350.     def __init__(self, settings):
  351.         super().__init__("Grok", settings.default_temperature, settings.is_grok_available)
  352.         self.model = settings.grok_model
  353.         self.max_tokens = getattr(settings, "grok_max_tokens", 8192)
  354.         if self.available:
  355.             self.client = AsyncOpenAI(
  356.                 api_key=settings.xai_api_key,
  357.                 base_url="https://api.x.ai/v1",
  358.                 timeout=getattr(settings, "grok_timeout", 120.0)
  359.             )
  360.         # Track truncation to prevent loops
  361.         self._last_truncated = False
  362.         self._truncation_count = 0
  363.  
  364.     async def _ping_provider(self):
  365.         await self.client.chat.completions.create(
  366.             model=self.model, max_tokens=10, messages=[{"role": "user", "content": "Hi"}]
  367.         )
  368.  
  369.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 120.0):
  370.         if not self.available:
  371.             yield f"[{self.name} unavailable]"
  372.             return
  373.        
  374.         cid_str = str(conversation_id)
  375.         log_llm_request("grok", len(messages), cid_str)
  376.        
  377.         # Loop prevention: if we just truncated, add a warning to help Grok not repeat
  378.         if self._last_truncated:
  379.             self._truncation_count += 1
  380.             if self._truncation_count >= 3:
  381.                 # Hard stop after 3 consecutive truncations
  382.                 log_llm_error("grok", Exception("Truncation loop detected - 3 consecutive truncations"), cid_str)
  383.                 yield f"\n[Grok truncation loop detected. Skipping to prevent repetition. Please continue with a different speaker.]\n"
  384.                 self._last_truncated = False
  385.                 self._truncation_count = 0
  386.                 return
  387.            
  388.             # Add context to help Grok know she was cut off
  389.             truncation_notice = {
  390.                 "role": "system",
  391.                 "content": "NOTICE: Your previous response was truncated due to length limits. Do NOT repeat what you already said. Either summarize your remaining points briefly, or yield to another speaker with ◉[name]."
  392.             }
  393.             messages = messages + [truncation_notice]
  394.        
  395.         full = ""
  396.         finish_reason = None
  397.        
  398.         try:
  399.             full_messages = [{"role": "system", "content": self.system_prompt}] + messages
  400.             async with timeout_ctx(timeout_sec):
  401.                 stream = await self.client.chat.completions.create(
  402.                     model=self.model,
  403.                     messages=full_messages,
  404.                     temperature=self.temperature,
  405.                     stream=True,
  406.                     max_tokens=self.max_tokens
  407.                 )
  408.                 async for chunk in stream:
  409.                     # Capture finish_reason from the final chunk
  410.                     if chunk.choices:
  411.                         if chunk.choices[0].finish_reason:
  412.                             finish_reason = chunk.choices[0].finish_reason
  413.                         if chunk.choices[0].delta.content:
  414.                             text = chunk.choices[0].delta.content
  415.                             full += text
  416.                             yield text
  417.            
  418.             # Check for truncation
  419.             if finish_reason == "length":
  420.                 log_llm_diagnostic("grok", "truncation", f"Response truncated at {len(full)} chars", cid_str)
  421.                 self._last_truncated = True
  422.                 yield f"\n\n[Response truncated due to length limit. Grok may continue on next turn or yield to another speaker.]\n"
  423.             else:
  424.                 # Successful complete response - reset truncation tracking
  425.                 self._last_truncated = False
  426.                 self._truncation_count = 0
  427.            
  428.             log_llm_response("grok", len(full), cid_str)
  429.            
  430.         except asyncio.TimeoutError:
  431.             log_llm_error("grok", Exception(f"Timeout after {timeout_sec}s"), cid_str)
  432.             self._last_truncated = True  # Treat timeout like truncation
  433.             yield f"\n[Grok timed out after {timeout_sec}s. Response may be incomplete.]\n"
  434.            
  435.         except Exception as e:
  436.             log_llm_error("grok", e, cid_str)
  437.             yield f"\n[Grok Error: {e}]\n"
  438.  
  439.  
  440. # === DEEPSEEK ===
  441. class DeepSeekClient(AIClient):
  442.     def __init__(self, settings):
  443.         super().__init__("DeepSeek", settings.default_temperature, settings.is_deepseek_available)
  444.         self.model = settings.deepseek_model
  445.         if self.available:
  446.             self.client = AsyncOpenAI(
  447.                 api_key=settings.deepseek_api_key,
  448.                 base_url="https://api.deepseek.com",
  449.                 timeout=60.0
  450.             )
  451.  
  452.     async def _ping_provider(self):
  453.         # FIX: Revert to standard max_tokens
  454.         await self.client.chat.completions.create(
  455.             model=self.model, max_tokens=10, messages=[{"role": "user", "content": "Hi"}]
  456.         )
  457.  
  458.     async def stream_response(self, messages, conversation_id=None, timeout_sec: float = 60.0):
  459.         if not self.available:
  460.             yield f"[{self.name} unavailable]"
  461.             return
  462.  
  463.         cid_str = str(conversation_id)
  464.         log_llm_request("deepseek", len(messages), cid_str)
  465.         full = ""
  466.         try:
  467.             full_messages = [{"role": "system", "content": self.system_prompt}] + messages
  468.             async with timeout_ctx(timeout_sec):
  469.                 stream = await self.client.chat.completions.create(
  470.                     model=self.model, messages=full_messages, temperature=self.temperature, stream=True, max_tokens=4096
  471.                 )
  472.                 async for chunk in stream:
  473.                     if chunk.choices and chunk.choices[0].delta.content:
  474.                         text = chunk.choices[0].delta.content
  475.                         full += text
  476.                         yield text
  477.             log_llm_response("deepseek", len(full), cid_str)
  478.         except Exception as e:
  479.             log_llm_error("deepseek", e, cid_str)
  480.             yield f"\n[DeepSeek Error: {e}]\n"
  481.  
  482.  
  483. # === ASYNC FACTORY FUNCTION (SEQUENTIAL) ===
  484. async def initialize_clients(settings) -> Dict[str, AIClient]:
  485.     clients = {
  486.         "claude": ClaudeClient(settings),
  487.         "gpt": GPTClient(settings),
  488.         "gemini": GeminiClient(settings),
  489.         "grok": GrokClient(settings),
  490.         "deepseek": DeepSeekClient(settings),
  491.     }
  492.  
  493.     print("\n--- 🔨 OPENING AI LOBBY (Sequential Mode) ---")
  494.    
  495.     for name, client in clients.items():
  496.         await client.verify_connection()
  497.    
  498.     print("--- 🔨 LOBBY CLOSED ---\n")
  499.  
  500.     return clients
  501. #
  502. #
  503. #
  504. # config.py
  505. """
  506. Configuration management using Pydantic Settings.
  507. Updated Dec 19, 2025 - GPT upgraded to 5.2
  508. """
  509.  
  510. from pydantic_settings import BaseSettings, SettingsConfigDict
  511. from typing import Optional
  512.  
  513.  
  514. class Settings(BaseSettings):
  515.     """App config: Environment variables are the source of truth."""
  516.  
  517.     # === Pydantic Configuration ===
  518.     model_config = SettingsConfigDict(
  519.         env_file=".env",
  520.         env_file_encoding="utf-8",
  521.         case_sensitive=False,
  522.         extra="ignore",
  523.         env_ignore_empty=True,
  524.     )
  525.  
  526.     # === API KEYS ===
  527.     anthropic_api_key: Optional[str] = None
  528.     openai_api_key: str                    # REQUIRED
  529.     gemini_api_key: Optional[str] = None   # Explicitly GEMINI, not GOOGLE
  530.     xai_api_key: Optional[str] = None
  531.     deepseek_api_key: Optional[str] = None
  532.  
  533.     # === Paths & Server ===
  534.     database_url: str = "sqlite+aiosqlite:///./roundtable.db"
  535.     conversations_base_path: str = "../conversations"
  536.     artifacts_base_path: str = "../conversations"
  537.     host: str = "0.0.0.0"
  538.     port: int = 8000
  539.  
  540.     # === AI Models ===
  541.     claude_model: str = "claude-opus-4-5-20251101"
  542.     gpt_model: str = "gpt-5.2"
  543.     gemini_location: str = "global"
  544.     gemini_model: str = "gemini-3-pro-preview"
  545.     grok_model: str = "grok-4"
  546.     grok_timeout: float = 120.0
  547.     grok_max_tokens: int = 8192
  548.     deepseek_model: str = "deepseek-chat"
  549.  
  550.     # === Behavior ===
  551.     default_temperature: float = 0.7
  552.  
  553.     # === Pricing (tracking only) ===
  554.     claude_price_input: float = 3.00
  555.     claude_price_output: float = 15.00
  556.     claude_price_input_cached: float = 0.30
  557.     claude_price_output_cached: float = 1.50
  558.  
  559.     gpt_price_input: float = 2.50
  560.     gpt_price_output: float = 10.00
  561.  
  562.     gemini_price_input: float = 0.35
  563.     gemini_price_output: float = 1.05
  564.  
  565.     grok_price_input: float = 5.00
  566.     grok_price_output: float = 15.00
  567.  
  568.     deepseek_price_input: float = 0.14
  569.     deepseek_price_output: float = 0.28
  570.  
  571.     # === Features ===
  572.     enable_cost_tracking: bool = True
  573.     cost_display_decimals: int = 4
  574.     max_context_tokens: int = 128_000
  575.  
  576.     # === Availability Flags ===
  577.     @property
  578.     def is_claude_available(self) -> bool:
  579.         return bool(self.anthropic_api_key)
  580.  
  581.     @property
  582.     def is_gpt_available(self) -> bool:
  583.         return bool(self.openai_api_key)
  584.  
  585.     @property
  586.     def is_gemini_available(self) -> bool:
  587.         return bool(self.gemini_api_key)
  588.  
  589.     @property
  590.     def is_grok_available(self) -> bool:
  591.         return bool(self.xai_api_key)
  592.  
  593.     @property
  594.     def is_deepseek_available(self) -> bool:
  595.         return bool(self.deepseek_api_key)
  596.  
  597.  
  598. # Global singleton
  599. settings = Settings()
  600. print("OPENAI KEY LOADED:", repr(settings.openai_api_key))
  601. #
  602. #
  603. #
  604. # AI Roundtable - Environment Variables
  605. # Add these to your System Environment Variables
  606.  
  607. # Anthropic (Claude) - Required
  608. ANTHROPIC_API_KEY=your_anthropic_key_here
  609. # OR if you use lowercase:
  610. sonnet_api_key=your_anthropic_key_here
  611.  
  612. # xAI (Grok) - Required
  613. XAI_API_KEY=your_xai_key_here
  614.  
  615. # DeepSeek - Required  
  616. DEEPSEEK_API_KEY=your_deepseek_key_here
  617.  
  618. # OpenAI (GPT) - Required
  619. OPENAI_API_KEY=your_openai_key_here
  620.  
  621. # Google Gemini - Optional (uses new google-genai SDK)
  622. GEMINI_API_KEY=your_gemini_key_here
  623. # OR legacy name (also works):
  624. GOOGLE_API_KEY=your_google_key_here
Tags: python ai
Advertisement
Add Comment
Please, Sign In to add comment