mojentic/llm/gateways/
openai_model_registry.rs

1//! OpenAI Model Registry for managing model-specific configurations and capabilities.
2//!
3//! This module provides infrastructure for categorizing OpenAI models and managing
4//! their specific parameter requirements and capabilities.
5
6use std::collections::HashMap;
7use std::sync::LazyLock;
8use tracing::warn;
9
10/// Classification of OpenAI model types based on their capabilities and parameters.
11#[derive(Debug, Clone, Copy, PartialEq, Eq)]
12pub enum ModelType {
13    /// Models like o1, o3 that use max_completion_tokens
14    Reasoning,
15    /// Standard chat models that use max_tokens
16    Chat,
17    /// Text embedding models
18    Embedding,
19    /// Content moderation models
20    Moderation,
21}
22
23/// Defines the capabilities and parameter requirements for a model.
24#[derive(Debug, Clone)]
25pub struct ModelCapabilities {
26    pub model_type: ModelType,
27    pub supports_tools: bool,
28    pub supports_streaming: bool,
29    pub supports_vision: bool,
30    pub max_context_tokens: Option<u32>,
31    pub max_output_tokens: Option<u32>,
32    /// None means all temperatures supported, empty vec means no temperature parameter allowed
33    pub supported_temperatures: Option<Vec<f32>>,
34    pub supports_chat_api: bool,
35    pub supports_completions_api: bool,
36    pub supports_responses_api: bool,
37}
38
39impl ModelCapabilities {
40    /// Get the correct parameter name for token limits based on model type.
41    pub fn get_token_limit_param(&self) -> &'static str {
42        if self.model_type == ModelType::Reasoning {
43            "max_completion_tokens"
44        } else {
45            "max_tokens"
46        }
47    }
48
49    /// Check if the model supports a specific temperature value.
50    pub fn supports_temperature(&self, temperature: f32) -> bool {
51        match &self.supported_temperatures {
52            None => true, // All temperatures supported if not restricted
53            Some(temps) if temps.is_empty() => false, // No temperature values supported
54            Some(temps) => temps.iter().any(|t| (*t - temperature).abs() < 0.01),
55        }
56    }
57}
58
59impl Default for ModelCapabilities {
60    fn default() -> Self {
61        Self {
62            model_type: ModelType::Chat,
63            supports_tools: true,
64            supports_streaming: true,
65            supports_vision: false,
66            max_context_tokens: None,
67            max_output_tokens: None,
68            supported_temperatures: None,
69            supports_chat_api: true,
70            supports_completions_api: false,
71            supports_responses_api: false,
72        }
73    }
74}
75
76/// Registry for managing OpenAI model configurations and capabilities.
77///
78/// This struct provides a centralized way to manage model-specific configurations,
79/// parameter mappings, and capabilities for OpenAI models.
80pub struct OpenAIModelRegistry {
81    models: HashMap<String, ModelCapabilities>,
82    pattern_mappings: HashMap<String, ModelType>,
83}
84
85impl OpenAIModelRegistry {
86    /// Create a new model registry with default models.
87    pub fn new() -> Self {
88        let mut registry = Self {
89            models: HashMap::new(),
90            pattern_mappings: HashMap::new(),
91        };
92        registry.initialize_default_models();
93        registry
94    }
95
96    fn initialize_default_models(&mut self) {
97        // Updated 2026-02-04 based on OpenAI API audit
98        // Reasoning Models (o1, o3, o4, gpt-5 series)
99        let reasoning_models = vec![
100            "o1",
101            "o1-2024-12-17",
102            "o3",
103            "o3-2025-04-16",
104            "o3-mini",
105            "o3-mini-2025-01-31",
106            "o4-mini",
107            "o4-mini-2025-04-16",
108            "gpt-5",
109            "gpt-5-2025-08-07",
110            "gpt-5-mini",
111            "gpt-5-mini-2025-08-07",
112            "gpt-5-nano",
113            "gpt-5-nano-2025-08-07",
114            "gpt-5-pro",
115            "gpt-5-pro-2025-10-06",
116            "gpt-5.1",
117            "gpt-5.1-2025-11-13",
118            "gpt-5.1-chat-latest",
119            "gpt-5.2",
120            "gpt-5.2-2025-12-11",
121            "gpt-5.2-chat-latest",
122        ];
123
124        for model in reasoning_models {
125            let is_gpt5_mini = model == "gpt-5-mini";
126            let is_o4_mini = model == "o4-mini";
127            let is_o_series =
128                model.starts_with("o1") || model.starts_with("o3") || model.starts_with("o4");
129            let is_gpt5_series = model.starts_with("gpt-5");
130            let is_mini_or_nano = model.contains("mini") || model.contains("nano");
131
132            // All reasoning models now support tools and streaming (audit 2026-02-04)
133            // Exceptions: gpt-5-mini (base) and o4-mini (base) do not support tools
134            let supports_tools = !is_gpt5_mini && !is_o4_mini;
135            let supports_streaming = true;
136
137            // Set context and output tokens based on model tier
138            let (context_tokens, output_tokens) = if is_gpt5_series {
139                if is_mini_or_nano {
140                    (200000, 32768)
141                } else {
142                    (300000, 50000)
143                }
144            } else {
145                (128000, 32768)
146            };
147
148            // Temperature restrictions based on model series (audit 2026-02-04)
149            // o1 series: temperature=1.0 only
150            // o3 series: temperature=1.0 only (was: no temperature)
151            // o4 series: temperature=1.0 only
152            // gpt-5 base/mini/nano/pro: temperature=1.0 only
153            // gpt-5.1*: all temperatures
154            // gpt-5.2*: all temperatures
155            let supported_temps = if model.starts_with("gpt-5.1") || model.starts_with("gpt-5.2") {
156                None // All temperatures supported
157            } else if is_o_series || is_gpt5_series {
158                Some(vec![1.0]) // Only temperature=1.0
159            } else {
160                None
161            };
162
163            // Endpoint support flags
164            let is_responses_only =
165                model.contains("pro") || model.contains("deep-research") || model == "gpt-5-codex";
166            let is_both_endpoint = model == "gpt-5.1" || model == "gpt-5.1-2025-11-13";
167
168            self.models.insert(
169                model.to_string(),
170                ModelCapabilities {
171                    model_type: ModelType::Reasoning,
172                    supports_tools,
173                    supports_streaming,
174                    supports_vision: false,
175                    max_context_tokens: Some(context_tokens),
176                    max_output_tokens: Some(output_tokens),
177                    supported_temperatures: supported_temps,
178                    supports_chat_api: !is_responses_only,
179                    supports_completions_api: is_both_endpoint,
180                    supports_responses_api: is_responses_only,
181                },
182            );
183        }
184
185        // Chat Models (GPT-4, GPT-4.1, and GPT-5 chat series)
186        let gpt4_and_newer_models = vec![
187            "chatgpt-4o-latest",
188            "gpt-4",
189            "gpt-4-0125-preview",
190            "gpt-4-0613",
191            "gpt-4-1106-preview",
192            "gpt-4-turbo",
193            "gpt-4-turbo-2024-04-09",
194            "gpt-4-turbo-preview",
195            "gpt-4.1",
196            "gpt-4.1-2025-04-14",
197            "gpt-4.1-mini",
198            "gpt-4.1-mini-2025-04-14",
199            "gpt-4.1-nano",
200            "gpt-4.1-nano-2025-04-14",
201            "gpt-4o",
202            "gpt-4o-2024-05-13",
203            "gpt-4o-2024-08-06",
204            "gpt-4o-2024-11-20",
205            "gpt-4o-audio-preview",
206            "gpt-4o-audio-preview-2024-12-17",
207            "gpt-4o-audio-preview-2025-06-03",
208            "gpt-4o-mini",
209            "gpt-4o-mini-2024-07-18",
210            "gpt-4o-mini-audio-preview",
211            "gpt-4o-mini-audio-preview-2024-12-17",
212            "gpt-4o-mini-search-preview",
213            "gpt-4o-mini-search-preview-2025-03-11",
214            "gpt-4o-search-preview",
215            "gpt-4o-search-preview-2025-03-11",
216            "gpt-5-chat-latest",
217            "gpt-5-search-api",
218            "gpt-5-search-api-2025-10-14",
219        ];
220
221        for model in gpt4_and_newer_models {
222            // Audit 2026-02-04: Keep vision=true for gpt-4o (probe limitation, not real capability change)
223            let vision_support = model.contains("gpt-4o");
224            let is_mini_or_nano = model.contains("mini") || model.contains("nano");
225            let is_audio = model.contains("audio-preview");
226            let is_search = model.contains("search");
227            let is_gpt41 = model.contains("gpt-4.1");
228            let is_gpt41_nano_base = model == "gpt-4.1-nano";
229
230            // Audit 2026-02-04: chatgpt-4o-latest, gpt-4.1-nano (base only), audio models, and search models don't support tools
231            let supports_tools =
232                model != "chatgpt-4o-latest" && !is_gpt41_nano_base && !is_audio && !is_search;
233
234            // Audio models don't support streaming (require audio modality)
235            let supports_streaming = !is_audio;
236
237            let (context_tokens, output_tokens) = if is_gpt41 {
238                if is_mini_or_nano {
239                    (128000, 16384)
240                } else {
241                    (200000, 32768)
242                }
243            } else if model.contains("gpt-4o") {
244                (128000, 16384)
245            } else if model.starts_with("gpt-5") {
246                (300000, 50000)
247            } else {
248                (32000, 8192)
249            };
250
251            // Search models don't allow temperature parameter
252            let supported_temps = if is_search { Some(vec![]) } else { None };
253
254            // Endpoint support flags
255            let is_both_endpoint = model == "gpt-4.1-nano"
256                || model == "gpt-4.1-nano-2025-04-14"
257                || model == "gpt-4o-mini"
258                || model == "gpt-4o-mini-2024-07-18";
259
260            self.models.insert(
261                model.to_string(),
262                ModelCapabilities {
263                    model_type: ModelType::Chat,
264                    supports_tools,
265                    supports_streaming,
266                    supports_vision: vision_support,
267                    max_context_tokens: Some(context_tokens),
268                    max_output_tokens: Some(output_tokens),
269                    supported_temperatures: supported_temps,
270                    supports_chat_api: true,
271                    supports_completions_api: is_both_endpoint,
272                    supports_responses_api: false,
273                },
274            );
275        }
276
277        // Chat Models (GPT-3.5 series)
278        let gpt35_models = vec![
279            "gpt-3.5-turbo",
280            "gpt-3.5-turbo-0125",
281            "gpt-3.5-turbo-1106",
282            "gpt-3.5-turbo-16k",
283            "gpt-3.5-turbo-instruct",
284            "gpt-3.5-turbo-instruct-0914",
285        ];
286
287        for model in gpt35_models {
288            let is_instruct = model.contains("instruct");
289
290            self.models.insert(
291                model.to_string(),
292                ModelCapabilities {
293                    model_type: ModelType::Chat,
294                    supports_tools: !is_instruct,
295                    supports_streaming: !is_instruct,
296                    supports_vision: false,
297                    max_context_tokens: Some(16385),
298                    max_output_tokens: Some(4096),
299                    supported_temperatures: None,
300                    supports_chat_api: !is_instruct,
301                    supports_completions_api: is_instruct,
302                    supports_responses_api: false,
303                },
304            );
305        }
306
307        // Embedding Models
308        let embedding_models = vec![
309            "text-embedding-3-large",
310            "text-embedding-3-small",
311            "text-embedding-ada-002",
312        ];
313
314        for model in embedding_models {
315            self.models.insert(
316                model.to_string(),
317                ModelCapabilities {
318                    model_type: ModelType::Embedding,
319                    supports_tools: false,
320                    supports_streaming: false,
321                    supports_vision: false,
322                    max_context_tokens: None,
323                    max_output_tokens: None,
324                    supported_temperatures: None,
325                    supports_chat_api: false,
326                    supports_completions_api: false,
327                    supports_responses_api: false,
328                },
329            );
330        }
331
332        // Legacy & Codex Models - completions-only and responses-only
333        self.models.insert(
334            "babbage-002".to_string(),
335            ModelCapabilities {
336                model_type: ModelType::Chat,
337                supports_tools: false,
338                supports_streaming: false,
339                supports_vision: false,
340                max_context_tokens: Some(16384),
341                max_output_tokens: Some(4096),
342                supported_temperatures: None,
343                supports_chat_api: false,
344                supports_completions_api: true,
345                supports_responses_api: false,
346            },
347        );
348        self.models.insert(
349            "davinci-002".to_string(),
350            ModelCapabilities {
351                model_type: ModelType::Chat,
352                supports_tools: false,
353                supports_streaming: false,
354                supports_vision: false,
355                max_context_tokens: Some(16384),
356                max_output_tokens: Some(4096),
357                supported_temperatures: None,
358                supports_chat_api: false,
359                supports_completions_api: true,
360                supports_responses_api: false,
361            },
362        );
363        self.models.insert(
364            "gpt-5.1-codex-mini".to_string(),
365            ModelCapabilities {
366                model_type: ModelType::Reasoning,
367                supports_tools: false,
368                supports_streaming: false,
369                supports_vision: false,
370                max_context_tokens: Some(200000),
371                max_output_tokens: Some(32768),
372                supported_temperatures: None,
373                supports_chat_api: false,
374                supports_completions_api: true,
375                supports_responses_api: false,
376            },
377        );
378        self.models.insert(
379            "codex-mini-latest".to_string(),
380            ModelCapabilities {
381                model_type: ModelType::Reasoning,
382                supports_tools: false,
383                supports_streaming: false,
384                supports_vision: false,
385                max_context_tokens: Some(200000),
386                max_output_tokens: Some(32768),
387                supported_temperatures: None,
388                supports_chat_api: false,
389                supports_completions_api: false,
390                supports_responses_api: true,
391            },
392        );
393
394        // Pattern mappings for unknown models
395        self.pattern_mappings.insert("o1".to_string(), ModelType::Reasoning);
396        self.pattern_mappings.insert("o3".to_string(), ModelType::Reasoning);
397        self.pattern_mappings.insert("o4".to_string(), ModelType::Reasoning);
398        self.pattern_mappings.insert("gpt-5.2".to_string(), ModelType::Reasoning);
399        self.pattern_mappings.insert("gpt-5.1".to_string(), ModelType::Reasoning);
400        self.pattern_mappings.insert("gpt-5".to_string(), ModelType::Reasoning);
401        self.pattern_mappings.insert("gpt-4".to_string(), ModelType::Chat);
402        self.pattern_mappings.insert("gpt-4.1".to_string(), ModelType::Chat);
403        self.pattern_mappings.insert("gpt-3.5".to_string(), ModelType::Chat);
404        self.pattern_mappings.insert("chatgpt".to_string(), ModelType::Chat);
405        self.pattern_mappings.insert("text-embedding".to_string(), ModelType::Embedding);
406        self.pattern_mappings
407            .insert("text-moderation".to_string(), ModelType::Moderation);
408    }
409
410    /// Get the capabilities for a specific model.
411    pub fn get_model_capabilities(&self, model_name: &str) -> ModelCapabilities {
412        // Direct lookup first
413        if let Some(caps) = self.models.get(model_name) {
414            return caps.clone();
415        }
416
417        // Pattern matching for unknown models
418        let model_lower = model_name.to_lowercase();
419        for (pattern, model_type) in &self.pattern_mappings {
420            if model_lower.contains(pattern) {
421                warn!(
422                    model = model_name,
423                    pattern = pattern,
424                    inferred_type = ?model_type,
425                    "Using pattern matching for unknown model"
426                );
427                return self.get_default_capabilities_for_type(*model_type);
428            }
429        }
430
431        // Default to chat model if no pattern matches
432        warn!(model = model_name, "Unknown model, defaulting to chat model capabilities");
433        self.get_default_capabilities_for_type(ModelType::Chat)
434    }
435
436    fn get_default_capabilities_for_type(&self, model_type: ModelType) -> ModelCapabilities {
437        match model_type {
438            ModelType::Reasoning => ModelCapabilities {
439                model_type: ModelType::Reasoning,
440                supports_tools: false,
441                supports_streaming: false,
442                supports_vision: false,
443                max_context_tokens: None,
444                max_output_tokens: None,
445                supported_temperatures: None,
446                supports_chat_api: true,
447                supports_completions_api: false,
448                supports_responses_api: false,
449            },
450            ModelType::Chat => ModelCapabilities {
451                model_type: ModelType::Chat,
452                supports_tools: true,
453                supports_streaming: true,
454                supports_vision: false,
455                max_context_tokens: None,
456                max_output_tokens: None,
457                supported_temperatures: None,
458                supports_chat_api: true,
459                supports_completions_api: false,
460                supports_responses_api: false,
461            },
462            ModelType::Embedding => ModelCapabilities {
463                model_type: ModelType::Embedding,
464                supports_tools: false,
465                supports_streaming: false,
466                supports_vision: false,
467                max_context_tokens: None,
468                max_output_tokens: None,
469                supported_temperatures: None,
470                supports_chat_api: false,
471                supports_completions_api: false,
472                supports_responses_api: false,
473            },
474            ModelType::Moderation => ModelCapabilities {
475                model_type: ModelType::Moderation,
476                supports_tools: false,
477                supports_streaming: false,
478                supports_vision: false,
479                max_context_tokens: None,
480                max_output_tokens: None,
481                supported_temperatures: None,
482                supports_chat_api: false,
483                supports_completions_api: false,
484                supports_responses_api: false,
485            },
486        }
487    }
488
489    /// Check if a model is a reasoning model.
490    pub fn is_reasoning_model(&self, model_name: &str) -> bool {
491        let capabilities = self.get_model_capabilities(model_name);
492        capabilities.model_type == ModelType::Reasoning
493    }
494
495    /// Get a list of all explicitly registered models.
496    pub fn get_registered_models(&self) -> Vec<String> {
497        self.models.keys().cloned().collect()
498    }
499
500    /// Register a new model with its capabilities.
501    pub fn register_model(&mut self, model_name: &str, capabilities: ModelCapabilities) {
502        self.models.insert(model_name.to_string(), capabilities);
503    }
504
505    /// Register a pattern for inferring model types.
506    pub fn register_pattern(&mut self, pattern: &str, model_type: ModelType) {
507        self.pattern_mappings.insert(pattern.to_string(), model_type);
508    }
509}
510
511impl Default for OpenAIModelRegistry {
512    fn default() -> Self {
513        Self::new()
514    }
515}
516
517/// Global registry instance.
518pub static MODEL_REGISTRY: LazyLock<OpenAIModelRegistry> = LazyLock::new(OpenAIModelRegistry::new);
519
520/// Get the global OpenAI model registry instance.
521pub fn get_model_registry() -> &'static OpenAIModelRegistry {
522    &MODEL_REGISTRY
523}
524
525#[cfg(test)]
526mod tests {
527    use super::*;
528
529    #[test]
530    fn test_model_type_enum() {
531        assert_ne!(ModelType::Reasoning, ModelType::Chat);
532        assert_eq!(ModelType::Reasoning, ModelType::Reasoning);
533    }
534
535    #[test]
536    fn test_model_capabilities_default() {
537        let caps = ModelCapabilities::default();
538        assert_eq!(caps.model_type, ModelType::Chat);
539        assert!(caps.supports_tools);
540        assert!(caps.supports_streaming);
541        assert!(!caps.supports_vision);
542    }
543
544    #[test]
545    fn test_get_token_limit_param_reasoning() {
546        let caps = ModelCapabilities {
547            model_type: ModelType::Reasoning,
548            ..Default::default()
549        };
550        assert_eq!(caps.get_token_limit_param(), "max_completion_tokens");
551    }
552
553    #[test]
554    fn test_get_token_limit_param_chat() {
555        let caps = ModelCapabilities {
556            model_type: ModelType::Chat,
557            ..Default::default()
558        };
559        assert_eq!(caps.get_token_limit_param(), "max_tokens");
560    }
561
562    #[test]
563    fn test_supports_temperature_unrestricted() {
564        let caps = ModelCapabilities {
565            supported_temperatures: None,
566            ..Default::default()
567        };
568        assert!(caps.supports_temperature(0.5));
569        assert!(caps.supports_temperature(1.0));
570        assert!(caps.supports_temperature(0.0));
571    }
572
573    #[test]
574    fn test_supports_temperature_restricted() {
575        let caps = ModelCapabilities {
576            supported_temperatures: Some(vec![1.0]),
577            ..Default::default()
578        };
579        assert!(caps.supports_temperature(1.0));
580        assert!(!caps.supports_temperature(0.5));
581    }
582
583    #[test]
584    fn test_supports_temperature_none_allowed() {
585        let caps = ModelCapabilities {
586            supported_temperatures: Some(vec![]),
587            ..Default::default()
588        };
589        assert!(!caps.supports_temperature(1.0));
590        assert!(!caps.supports_temperature(0.5));
591    }
592
593    #[test]
594    fn test_registry_new() {
595        let registry = OpenAIModelRegistry::new();
596        assert!(!registry.models.is_empty());
597        assert!(!registry.pattern_mappings.is_empty());
598    }
599
600    #[test]
601    fn test_get_known_model_capabilities() {
602        let registry = OpenAIModelRegistry::new();
603        let caps = registry.get_model_capabilities("gpt-4");
604        assert_eq!(caps.model_type, ModelType::Chat);
605        assert!(caps.supports_tools);
606    }
607
608    #[test]
609    fn test_get_reasoning_model_capabilities() {
610        let registry = OpenAIModelRegistry::new();
611        let caps = registry.get_model_capabilities("o1");
612        assert_eq!(caps.model_type, ModelType::Reasoning);
613    }
614
615    #[test]
616    fn test_get_unknown_model_pattern_matching() {
617        let registry = OpenAIModelRegistry::new();
618        let caps = registry.get_model_capabilities("gpt-4-unknown-version");
619        assert_eq!(caps.model_type, ModelType::Chat);
620    }
621
622    #[test]
623    fn test_get_unknown_model_default() {
624        let registry = OpenAIModelRegistry::new();
625        let caps = registry.get_model_capabilities("completely-unknown-model");
626        assert_eq!(caps.model_type, ModelType::Chat);
627    }
628
629    #[test]
630    fn test_is_reasoning_model() {
631        let registry = OpenAIModelRegistry::new();
632        assert!(registry.is_reasoning_model("o1"));
633        assert!(registry.is_reasoning_model("o3-mini"));
634        assert!(!registry.is_reasoning_model("gpt-4"));
635    }
636
637    #[test]
638    fn test_get_registered_models() {
639        let registry = OpenAIModelRegistry::new();
640        let models = registry.get_registered_models();
641        assert!(models.contains(&"gpt-4".to_string()));
642        assert!(models.contains(&"o1".to_string()));
643    }
644
645    #[test]
646    fn test_register_model() {
647        let mut registry = OpenAIModelRegistry::new();
648        registry.register_model(
649            "custom-model",
650            ModelCapabilities {
651                model_type: ModelType::Chat,
652                supports_tools: false,
653                ..Default::default()
654            },
655        );
656        let caps = registry.get_model_capabilities("custom-model");
657        assert!(!caps.supports_tools);
658    }
659
660    #[test]
661    fn test_register_pattern() {
662        let mut registry = OpenAIModelRegistry::new();
663        registry.register_pattern("custom-pattern", ModelType::Embedding);
664        let caps = registry.get_model_capabilities("my-custom-pattern-model");
665        assert_eq!(caps.model_type, ModelType::Embedding);
666    }
667
668    #[test]
669    fn test_global_registry() {
670        let registry = get_model_registry();
671        let caps = registry.get_model_capabilities("gpt-4");
672        assert_eq!(caps.model_type, ModelType::Chat);
673    }
674
675    #[test]
676    fn test_embedding_models() {
677        let registry = OpenAIModelRegistry::new();
678        let caps = registry.get_model_capabilities("text-embedding-3-large");
679        assert_eq!(caps.model_type, ModelType::Embedding);
680        assert!(!caps.supports_tools);
681        assert!(!caps.supports_streaming);
682    }
683
684    #[test]
685    fn test_gpt35_instruct_no_tools() {
686        let registry = OpenAIModelRegistry::new();
687        let caps = registry.get_model_capabilities("gpt-3.5-turbo-instruct");
688        assert_eq!(caps.model_type, ModelType::Chat);
689        assert!(!caps.supports_tools);
690        assert!(!caps.supports_streaming);
691    }
692
693    // Tests for audit 2026-02-04 changes
694
695    #[test]
696    fn test_o1_supports_tools_and_streaming() {
697        let registry = OpenAIModelRegistry::new();
698        let caps = registry.get_model_capabilities("o1");
699        assert!(caps.supports_tools);
700        assert!(caps.supports_streaming);
701        assert_eq!(caps.supported_temperatures, Some(vec![1.0]));
702    }
703
704    #[test]
705    fn test_o3_supports_tools_and_streaming_and_temperature() {
706        let registry = OpenAIModelRegistry::new();
707        let caps = registry.get_model_capabilities("o3");
708        assert!(caps.supports_tools);
709        assert!(caps.supports_streaming);
710        assert_eq!(caps.supported_temperatures, Some(vec![1.0]));
711    }
712
713    #[test]
714    fn test_o3_mini_supports_tools_and_streaming() {
715        let registry = OpenAIModelRegistry::new();
716        let caps = registry.get_model_capabilities("o3-mini");
717        assert!(caps.supports_tools);
718        assert!(caps.supports_streaming);
719        assert_eq!(caps.supported_temperatures, Some(vec![1.0]));
720    }
721
722    #[test]
723    fn test_o4_mini_no_tools_but_supports_streaming() {
724        let registry = OpenAIModelRegistry::new();
725        let caps = registry.get_model_capabilities("o4-mini");
726        assert!(!caps.supports_tools);
727        assert!(caps.supports_streaming);
728    }
729
730    #[test]
731    fn test_o4_mini_dated_supports_tools() {
732        let registry = OpenAIModelRegistry::new();
733        let caps = registry.get_model_capabilities("o4-mini-2025-04-16");
734        assert!(caps.supports_tools);
735        assert!(caps.supports_streaming);
736    }
737
738    #[test]
739    fn test_chatgpt_4o_latest_no_tools() {
740        let registry = OpenAIModelRegistry::new();
741        let caps = registry.get_model_capabilities("chatgpt-4o-latest");
742        assert!(!caps.supports_tools);
743        assert!(caps.supports_streaming);
744    }
745
746    #[test]
747    fn test_gpt41_nano_base_no_tools() {
748        let registry = OpenAIModelRegistry::new();
749        let caps = registry.get_model_capabilities("gpt-4.1-nano");
750        assert!(!caps.supports_tools);
751    }
752
753    #[test]
754    fn test_gpt41_nano_dated_has_tools() {
755        let registry = OpenAIModelRegistry::new();
756        let caps = registry.get_model_capabilities("gpt-4.1-nano-2025-04-14");
757        assert!(caps.supports_tools);
758    }
759
760    #[test]
761    fn test_audio_preview_no_tools_no_streaming() {
762        let registry = OpenAIModelRegistry::new();
763        let caps = registry.get_model_capabilities("gpt-4o-audio-preview");
764        assert!(!caps.supports_tools);
765        assert!(!caps.supports_streaming);
766    }
767
768    #[test]
769    fn test_search_preview_no_tools_no_temperature() {
770        let registry = OpenAIModelRegistry::new();
771        let caps = registry.get_model_capabilities("gpt-4o-search-preview");
772        assert!(!caps.supports_tools);
773        assert!(caps.supports_streaming);
774        assert_eq!(caps.supported_temperatures, Some(vec![]));
775    }
776
777    #[test]
778    fn test_gpt5_chat_latest_is_chat_type() {
779        let registry = OpenAIModelRegistry::new();
780        let caps = registry.get_model_capabilities("gpt-5-chat-latest");
781        assert_eq!(caps.model_type, ModelType::Chat);
782        assert!(caps.supports_tools);
783        assert_eq!(caps.supported_temperatures, None);
784    }
785
786    #[test]
787    fn test_gpt5_mini_base_no_tools() {
788        let registry = OpenAIModelRegistry::new();
789        let caps = registry.get_model_capabilities("gpt-5-mini");
790        assert!(!caps.supports_tools);
791    }
792
793    #[test]
794    fn test_gpt5_mini_dated_has_tools() {
795        let registry = OpenAIModelRegistry::new();
796        let caps = registry.get_model_capabilities("gpt-5-mini-2025-08-07");
797        assert!(caps.supports_tools);
798    }
799
800    #[test]
801    fn test_gpt5_pro_exists() {
802        let registry = OpenAIModelRegistry::new();
803        let caps = registry.get_model_capabilities("gpt-5-pro");
804        assert_eq!(caps.model_type, ModelType::Reasoning);
805        assert!(caps.supports_tools);
806    }
807
808    #[test]
809    fn test_gpt5_search_api_no_tools_no_temperature() {
810        let registry = OpenAIModelRegistry::new();
811        let caps = registry.get_model_capabilities("gpt-5-search-api");
812        assert_eq!(caps.model_type, ModelType::Chat);
813        assert!(!caps.supports_tools);
814        assert_eq!(caps.supported_temperatures, Some(vec![]));
815    }
816
817    #[test]
818    fn test_gpt51_all_temperatures() {
819        let registry = OpenAIModelRegistry::new();
820        let caps = registry.get_model_capabilities("gpt-5.1");
821        assert_eq!(caps.model_type, ModelType::Reasoning);
822        assert_eq!(caps.supported_temperatures, None);
823    }
824
825    #[test]
826    fn test_gpt52_all_temperatures() {
827        let registry = OpenAIModelRegistry::new();
828        let caps = registry.get_model_capabilities("gpt-5.2");
829        assert_eq!(caps.model_type, ModelType::Reasoning);
830        assert_eq!(caps.supported_temperatures, None);
831    }
832
833    #[test]
834    fn test_gpt51_pattern_matching() {
835        let registry = OpenAIModelRegistry::new();
836        let caps = registry.get_model_capabilities("gpt-5.1-unknown");
837        assert_eq!(caps.model_type, ModelType::Reasoning);
838    }
839
840    #[test]
841    fn test_gpt52_pattern_matching() {
842        let registry = OpenAIModelRegistry::new();
843        let caps = registry.get_model_capabilities("gpt-5.2-unknown");
844        assert_eq!(caps.model_type, ModelType::Reasoning);
845    }
846
847    #[test]
848    fn test_deprecated_models_removed() {
849        let registry = OpenAIModelRegistry::new();
850        let models = registry.get_registered_models();
851
852        // Verify removed models are not in registry
853        assert!(!models.contains(&"o1-mini".to_string()));
854        assert!(!models.contains(&"o1-mini-2024-09-12".to_string()));
855        assert!(!models.contains(&"o1-pro".to_string()));
856        assert!(!models.contains(&"o3-pro".to_string()));
857        assert!(!models.contains(&"o3-deep-research".to_string()));
858        assert!(!models.contains(&"o4-mini-deep-research".to_string()));
859        assert!(!models.contains(&"gpt-4o-audio-preview-2024-10-01".to_string()));
860        assert!(!models.contains(&"gpt-5-codex".to_string()));
861    }
862
863    #[test]
864    fn test_chat_only_model_endpoint_flags() {
865        let registry = OpenAIModelRegistry::new();
866        let caps = registry.get_model_capabilities("gpt-4");
867        assert!(caps.supports_chat_api);
868        assert!(!caps.supports_completions_api);
869        assert!(!caps.supports_responses_api);
870    }
871
872    #[test]
873    fn test_both_endpoint_model_flags() {
874        let registry = OpenAIModelRegistry::new();
875        let caps = registry.get_model_capabilities("gpt-4o-mini");
876        assert!(caps.supports_chat_api);
877        assert!(caps.supports_completions_api);
878        assert!(!caps.supports_responses_api);
879    }
880
881    #[test]
882    fn test_completions_only_model_flags() {
883        let registry = OpenAIModelRegistry::new();
884        let caps = registry.get_model_capabilities("gpt-3.5-turbo-instruct");
885        assert!(!caps.supports_chat_api);
886        assert!(caps.supports_completions_api);
887        assert!(!caps.supports_responses_api);
888    }
889
890    #[test]
891    fn test_responses_only_model_flags() {
892        let registry = OpenAIModelRegistry::new();
893        let caps = registry.get_model_capabilities("gpt-5-pro");
894        assert!(!caps.supports_chat_api);
895        assert!(!caps.supports_completions_api);
896        assert!(caps.supports_responses_api);
897    }
898
899    #[test]
900    fn test_legacy_completions_model_flags() {
901        let registry = OpenAIModelRegistry::new();
902        let caps = registry.get_model_capabilities("babbage-002");
903        assert!(!caps.supports_chat_api);
904        assert!(caps.supports_completions_api);
905        assert!(!caps.supports_responses_api);
906    }
907
908    #[test]
909    fn test_embedding_model_endpoint_flags() {
910        let registry = OpenAIModelRegistry::new();
911        let caps = registry.get_model_capabilities("text-embedding-3-large");
912        assert!(!caps.supports_chat_api);
913        assert!(!caps.supports_completions_api);
914        assert!(!caps.supports_responses_api);
915    }
916
917    #[test]
918    fn test_codex_mini_latest_responses_only() {
919        let registry = OpenAIModelRegistry::new();
920        let caps = registry.get_model_capabilities("codex-mini-latest");
921        assert!(!caps.supports_chat_api);
922        assert!(!caps.supports_completions_api);
923        assert!(caps.supports_responses_api);
924    }
925
926    #[test]
927    fn test_gpt51_both_chat_and_completions() {
928        let registry = OpenAIModelRegistry::new();
929        let caps = registry.get_model_capabilities("gpt-5.1");
930        assert!(caps.supports_chat_api);
931        assert!(caps.supports_completions_api);
932        assert!(!caps.supports_responses_api);
933    }
934
935    #[test]
936    fn test_default_capabilities_include_endpoint_flags() {
937        let registry = OpenAIModelRegistry::new();
938        let caps = registry.get_model_capabilities("completely-unknown-model-xyz");
939        assert!(caps.supports_chat_api);
940        assert!(!caps.supports_completions_api);
941        assert!(!caps.supports_responses_api);
942    }
943}