{
  "tools": [
    {
      "id": "gemini-2-5-flash-lite",
      "name": "Gemini 2.5 Flash Lite",
      "provider": "Google",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.0375,
        "output": 0.15
      },
      "description": "The most affordable Gemini model. Ultra-low cost for high-volume, simple coding and text tasks.",
      "features": [
        "Ultra-low cost",
        "Fast inference",
        "1M token context"
      ],
      "bestFor": "High-volume tasks, batch processing, cost-optimized pipelines",
      "contextWindow": "1M tokens",
      "releaseDate": "2025-06",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "qwen-turbo",
      "name": "Qwen Turbo",
      "provider": "Qwen",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.08,
        "output": 0.24
      },
      "description": "Fastest and cheapest Qwen model. Good for high-volume tasks.",
      "features": [
        "Fast inference",
        "Low cost"
      ],
      "bestFor": "High-volume tasks, simple coding",
      "contextWindow": "1M tokens",
      "releaseDate": "2024-01",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "mistral-nemo",
      "name": "Mistral Nemo",
      "provider": "Mistral",
      "category": [
        "budget",
        "coding"
      ],
      "pricing": {
        "input": 0.15,
        "output": 0.15
      },
      "description": "Compact 12B open-weight model co-developed with NVIDIA. Excellent coding performance at minimal cost.",
      "features": [
        "Open weights",
        "Self-hostable",
        "Efficient inference"
      ],
      "bestFor": "Self-hosted deployments, cost-sensitive coding, edge deployments",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-07",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "gemini-1-5-flash",
      "name": "Gemini 1.5 Flash",
      "provider": "Google",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.075,
        "output": 0.3
      },
      "description": "Cheapest Gemini model. Good for high-volume, simple tasks.",
      "features": [
        "Fast",
        "Low cost"
      ],
      "bestFor": "High-volume simple tasks, cost optimization",
      "contextWindow": "1M tokens",
      "releaseDate": "2024-05",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "mistral-small",
      "name": "Mistral Small 3",
      "provider": "Mistral",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.1,
        "output": 0.3
      },
      "description": "Mistral's cost-effective model. Very affordable for general-purpose tasks.",
      "features": [
        "Low cost",
        "Open weights",
        "Fast inference"
      ],
      "bestFor": "High-volume tasks, cost optimization",
      "contextWindow": "32K tokens",
      "releaseDate": "2025-01",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "phi-4",
      "name": "Microsoft Phi-4",
      "provider": "Microsoft",
      "category": [
        "budget",
        "coding"
      ],
      "pricing": {
        "input": 0.1,
        "output": 0.3
      },
      "description": "Microsoft's compact 14B model with strong reasoning and coding capability. Excellent value for small-scale deployments.",
      "features": [
        "Compact size",
        "Strong reasoning for size",
        "Open weights"
      ],
      "bestFor": "Edge deployments, local inference, budget coding",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-01",
      "url": "https://azure.microsoft.com/en-us/products/phi"
    },
    {
      "id": "gemini-2-0-flash",
      "name": "Gemini 2.0 Flash",
      "provider": "Google",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.1,
        "output": 0.4
      },
      "description": "Cheapest Google model. Fast responses for simple coding tasks.",
      "features": [
        "Fastest Google model",
        "Lowest cost"
      ],
      "bestFor": "Simple tasks, fast responses, cost optimization",
      "contextWindow": "1M tokens",
      "releaseDate": "2024-12",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "gemma-3-27b",
      "name": "Gemma 3 27B",
      "provider": "Google",
      "category": [
        "budget",
        "coding"
      ],
      "pricing": {
        "input": 0.1,
        "output": 0.4
      },
      "description": "Google's open-weight 27B model. Budget-friendly with strong coding capability and Google's research backing.",
      "features": [
        "Open weights",
        "Self-hostable",
        "Multilingual"
      ],
      "bestFor": "Self-hosted deployments, budget coding, research",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-03",
      "url": "https://ai.google.dev/gemma"
    },
    {
      "id": "gemini-2-5-flash",
      "name": "Gemini 2.5 Flash",
      "provider": "Google",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.15,
        "output": 0.6
      },
      "description": "Fast and affordable Google model. Great for high-volume coding and processing.",
      "features": [
        "Fast inference",
        "Low cost",
        "Multimodal"
      ],
      "bestFor": "High-volume coding, data processing, fast responses",
      "contextWindow": "1M tokens",
      "releaseDate": "2025-04",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "qwen-3-turbo",
      "name": "Qwen 3 Turbo",
      "provider": "Qwen",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.15,
        "output": 0.6
      },
      "description": "Fast and affordable Qwen 3 generation model. Good for high-volume tasks with improved quality over Qwen Turbo.",
      "features": [
        "Fast inference",
        "Low cost",
        "Improved over v1"
      ],
      "bestFor": "High-volume tasks, simple coding, batch processing",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-05",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "deepseek-jiuge",
      "name": "DeepSeek Jiuge",
      "provider": "DeepSeek",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.15,
        "output": 0.6
      },
      "description": "Ultra-budget DeepSeek model for high-volume tasks. Competitive with Gemini Flash pricing.",
      "features": [
        "Ultra-low cost",
        "Open weights",
        "Fast inference"
      ],
      "bestFor": "High-volume tasks, batch processing, cost-optimized pipelines",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-05",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "gpt-4o-mini",
      "name": "GPT-4o mini",
      "provider": "OpenAI",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.15,
        "output": 0.6,
        "cacheRead": 0.075
      },
      "description": "Affordable small model. Fast and cost-effective for high-volume coding tasks.",
      "features": [
        "Low cost",
        "Fast inference",
        "Function calling"
      ],
      "bestFor": "High-volume tasks, simple coding, cost-sensitive projects",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-07",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "grok-3-mini",
      "name": "Grok 3 Mini",
      "provider": "xAI",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.3,
        "output": 0.5
      },
      "description": "Cost-effective xAI model for high-volume tasks. Good balance of capability and affordability.",
      "features": [
        "Low cost",
        "Fast inference",
        "Real-time knowledge"
      ],
      "bestFor": "High-volume tasks, simple coding, cost-sensitive projects",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-02",
      "url": "https://docs.x.ai/docs/pricing"
    },
    {
      "id": "reka-flash",
      "name": "Reka Flash",
      "provider": "Reka",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.2,
        "output": 0.8
      },
      "description": "Reka's fast multimodal model. Compact and efficient for high-volume tasks with vision capability.",
      "features": [
        "Multimodal",
        "Fast inference",
        "Low cost"
      ],
      "bestFor": "High-volume tasks, vision + text, cost optimization",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-09",
      "url": "https://www.reka.ai"
    },
    {
      "id": "mistral-codestral",
      "name": "Codestral",
      "provider": "Mistral",
      "category": [
        "coding"
      ],
      "pricing": {
        "input": 0.3,
        "output": 0.9
      },
      "description": "Mistral's dedicated coding model. Open-weight and highly optimized for code generation and completion.",
      "features": [
        "Code-specialized",
        "Open weights",
        "Fill-in-the-middle support"
      ],
      "bestFor": "Code completion, code generation, IDE integration",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-06",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "llama-3-3-70b",
      "name": "Llama 3.3 70B",
      "provider": "Meta",
      "category": [
        "general-purpose",
        "coding",
        "budget"
      ],
      "pricing": {
        "input": 0.25,
        "output": 1
      },
      "description": "Meta's open-weight 70B model. Strong coding and general capability, widely supported across AI platforms.",
      "features": [
        "Open weights",
        "Self-hostable",
        "Wide ecosystem support"
      ],
      "bestFor": "Self-hosted deployments, cost-effective coding, custom fine-tuning",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-12",
      "url": "https://ai.meta.com/blog/llama-3-3/"
    },
    {
      "id": "deepseek-chat",
      "name": "DeepSeek Chat V3",
      "provider": "DeepSeek",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.27,
        "output": 1.1
      },
      "description": "Very affordable general-purpose model from DeepSeek. Strong coding and reasoning at low cost.",
      "features": [
        "Low cost",
        "Strong coding",
        "Open weights"
      ],
      "bestFor": "Cost-sensitive projects, coding, general tasks",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-12",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "deepseek-coder",
      "name": "DeepSeek Coder V2",
      "provider": "DeepSeek",
      "category": [
        "coding",
        "budget"
      ],
      "pricing": {
        "input": 0.27,
        "output": 1.1
      },
      "description": "DeepSeek's coding-specialized model. Open-source and very affordable.",
      "features": [
        "Code-specialized",
        "Open weights",
        "Low cost"
      ],
      "bestFor": "Code generation, code review, debugging",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-06",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "deepseek-coder-v3",
      "name": "DeepSeek Coder V3",
      "provider": "DeepSeek",
      "category": [
        "coding",
        "budget"
      ],
      "pricing": {
        "input": 0.27,
        "output": 1.1
      },
      "description": "Latest generation DeepSeek coding model. Improved code understanding and generation over V2.",
      "features": [
        "Code-specialized v3 architecture",
        "Open weights",
        "Multi-language support"
      ],
      "bestFor": "Code generation, refactoring, multi-language development",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-04",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "claude-3-haiku",
      "name": "Claude 3 Haiku",
      "provider": "Anthropic",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.25,
        "output": 1.25
      },
      "description": "Cheapest Claude model. Fast responses for simple tasks and basic coding.",
      "features": [
        "Fastest Claude model",
        "Lowest cost"
      ],
      "bestFor": "Simple queries, fast responses, cost-sensitive tasks",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-03",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "qwen-coder-turbo",
      "name": "Qwen Coder Turbo",
      "provider": "Qwen",
      "category": [
        "coding",
        "budget"
      ],
      "pricing": {
        "input": 0.25,
        "output": 1.25
      },
      "description": "Fast coding model from Qwen. Good price-performance for code generation.",
      "features": [
        "Fast coding",
        "Multi-language support",
        "Low cost"
      ],
      "bestFor": "High-volume coding, code review",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-06",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "deepseek-v3-2",
      "name": "DeepSeek V3.2",
      "provider": "DeepSeek",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.3,
        "output": 1.2
      },
      "description": "Updated V3 model with improved general reasoning and multilingual capability. Strong value proposition.",
      "features": [
        "Updated architecture",
        "Multilingual",
        "Open weights"
      ],
      "bestFor": "General tasks, bilingual coding, cost-effective workflows",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-07",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "qwen-coder-turbo-v2",
      "name": "Qwen Coder Turbo V2",
      "provider": "Qwen",
      "category": [
        "coding",
        "budget"
      ],
      "pricing": {
        "input": 0.3,
        "output": 1.2
      },
      "description": "Updated Qwen Coder Turbo with improved code generation quality. Strong value for budget coding.",
      "features": [
        "Code-optimized v2",
        "Multi-language support",
        "Fast inference"
      ],
      "bestFor": "Code generation, debugging, cost-effective development",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-06",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "qwen-plus",
      "name": "Qwen Plus",
      "provider": "Qwen",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.4,
        "output": 1.2
      },
      "description": "Balanced Qwen model for general tasks. Good price-performance ratio.",
      "features": [
        "Balanced performance",
        "Multilingual"
      ],
      "bestFor": "General-purpose tasks, bilingual coding",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-01",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "gpt-4.1-mini",
      "name": "GPT-4.1 mini",
      "provider": "OpenAI",
      "category": [
        "budget",
        "coding"
      ],
      "pricing": {
        "input": 0.4,
        "output": 1.6
      },
      "description": "Cost-optimized GPT-4.1 variant. Strong coding capability at budget pricing, replacing GPT-4o mini for many use cases.",
      "features": [
        "Budget pricing",
        "Improved over GPT-4o mini",
        "Function calling"
      ],
      "bestFor": "High-volume coding, cost-sensitive projects, automation",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-04",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "gpt-3.5-turbo",
      "name": "GPT-3.5 Turbo",
      "provider": "OpenAI",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.5,
        "output": 1.5
      },
      "description": "Budget model for simple tasks. Being phased out but still widely used.",
      "features": [
        "Low cost",
        "Fast responses"
      ],
      "bestFor": "Simple chatbots, basic text generation",
      "contextWindow": "16K tokens",
      "releaseDate": "2023-03",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "mistral-medium",
      "name": "Mistral Medium",
      "provider": "Mistral",
      "category": [
        "general-purpose",
        "coding"
      ],
      "pricing": {
        "input": 0.4,
        "output": 2
      },
      "description": "Mid-tier Mistral model between Small and Large. Strong coding capability at a moderate price point.",
      "features": [
        "Balanced performance",
        "Open weights",
        "Strong coding"
      ],
      "bestFor": "Day-to-day coding, mid-complexity tasks, cost-performance balance",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-03",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "qwen-3-coder",
      "name": "Qwen 3 Coder",
      "provider": "Qwen",
      "category": [
        "coding"
      ],
      "pricing": {
        "input": 0.5,
        "output": 2
      },
      "description": "Latest Qwen coding-specialized model. Strong performance on HumanEval and competitive programming benchmarks.",
      "features": [
        "Code-optimized architecture",
        "Strong Chinese support",
        "Multi-language"
      ],
      "bestFor": "Code generation, competitive programming, Chinese/English bilingual coding",
      "contextWindow": "256K tokens",
      "releaseDate": "2025-08",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "deepseek-reasoner",
      "name": "DeepSeek Reasoner (R1)",
      "provider": "DeepSeek",
      "category": [
        "reasoning",
        "budget"
      ],
      "pricing": {
        "input": 0.55,
        "output": 2.19
      },
      "description": "DeepSeek's reasoning model. Comparable to OpenAI's o1 but at much lower cost.",
      "features": [
        "Chain of thought reasoning",
        "Math optimization",
        "Open weights"
      ],
      "bestFor": "Complex reasoning, math, advanced coding",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-01",
      "url": "https://platform.deepseek.com/api-docs/pricing"
    },
    {
      "id": "qwen-coder-plus",
      "name": "Qwen Coder Plus",
      "provider": "Qwen",
      "category": [
        "coding"
      ],
      "pricing": {
        "input": 0.8,
        "output": 4
      },
      "description": "Qwen model specifically optimized for coding tasks.",
      "features": [
        "Code optimization",
        "Multi-language support"
      ],
      "bestFor": "Code generation, debugging, refactoring",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-06",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "claude-3-5-haiku",
      "name": "Claude 3.5 Haiku",
      "provider": "Anthropic",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 0.8,
        "output": 4,
        "cacheRead": 0.08,
        "cacheCreate": 1
      },
      "description": "Fast, cost-effective model for high-volume tasks. Great for code review and simple queries.",
      "features": [
        "Prompt caching",
        "Fast inference",
        "Low cost"
      ],
      "bestFor": "Code review, high-volume tasks, simple queries",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-10",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "claude-4-haiku",
      "name": "Claude 4 Haiku",
      "provider": "Anthropic",
      "category": [
        "budget",
        "general-purpose"
      ],
      "pricing": {
        "input": 0.8,
        "output": 4,
        "cacheRead": 0.08,
        "cacheCreate": 1
      },
      "description": "Updated Haiku model with improved reasoning over Claude 3.5 Haiku. Fast and affordable for high-volume tasks.",
      "features": [
        "Prompt caching",
        "Fast inference",
        "Improved reasoning over 3.5 Haiku"
      ],
      "bestFor": "High-volume tasks, simple queries, cost-sensitive workflows",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-08",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "o1-mini",
      "name": "OpenAI o1-mini",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "budget"
      ],
      "pricing": {
        "input": 1.1,
        "output": 4.4
      },
      "description": "Cost-effective reasoning model. Good for coding tasks that require logical reasoning.",
      "features": [
        "Reasoning capability",
        "Lower cost than full o1"
      ],
      "bestFor": "Coding logic, debugging, algorithm design",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-09",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o3-mini",
      "name": "OpenAI o3-mini",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "budget",
        "coding"
      ],
      "pricing": {
        "input": 1.1,
        "output": 4.4
      },
      "description": "Affordable reasoning model for coding tasks. Best price-performance for algorithm-heavy work.",
      "features": [
        "Reasoning capability",
        "Affordable",
        "Coding optimized"
      ],
      "bestFor": "Algorithm design, coding challenges, debugging",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-01",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o4-mini",
      "name": "OpenAI o4-mini",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "budget"
      ],
      "pricing": {
        "input": 1.1,
        "output": 4.4
      },
      "description": "Updated mini reasoning model. Similar pricing to o3-mini with updated capabilities.",
      "features": [
        "Updated reasoning",
        "Efficient inference"
      ],
      "bestFor": "General reasoning, coding tasks",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-04",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "gemini-1-5-pro",
      "name": "Gemini 1.5 Pro",
      "provider": "Google",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 1.25,
        "output": 5
      },
      "description": "Previous generation Google pro model. Good for general tasks.",
      "features": [
        "Long context",
        "Multimodal"
      ],
      "bestFor": "General-purpose tasks, document analysis",
      "contextWindow": "1M tokens",
      "releaseDate": "2024-04",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "claude-sonnet-4-lite",
      "name": "Claude Sonnet 4 Lite",
      "provider": "Anthropic",
      "category": [
        "general-purpose",
        "budget"
      ],
      "pricing": {
        "input": 1,
        "output": 5,
        "cacheRead": 0.1,
        "cacheCreate": 1.25
      },
      "description": "Lighter version of Claude Sonnet 4. Good balance of quality and cost for day-to-day coding.",
      "features": [
        "Prompt caching",
        "Faster inference",
        "Cost-effective"
      ],
      "bestFor": "Day-to-day coding, documentation, cost-conscious teams",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-07",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "qwen-max",
      "name": "Qwen Max",
      "provider": "Qwen",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 1.6,
        "output": 6.4
      },
      "description": "Qwen's most powerful model. Strong reasoning and coding capabilities.",
      "features": [
        "High reasoning capability",
        "Multilingual"
      ],
      "bestFor": "Complex reasoning, advanced coding",
      "contextWindow": "32K tokens",
      "releaseDate": "2024-03",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "mistral-large-2",
      "name": "Mistral Large 2",
      "provider": "Mistral",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 2,
        "output": 6
      },
      "description": "Mistral's flagship model. Strong multilingual and coding capability.",
      "features": [
        "Multilingual",
        "Strong coding",
        "Long context"
      ],
      "bestFor": "Enterprise coding, multilingual tasks",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-07",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "mistral-large-3",
      "name": "Mistral Large 3",
      "provider": "Mistral",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 2,
        "output": 6
      },
      "description": "Latest Mistral flagship model. Improved coding and multilingual capability over Large 2.",
      "features": [
        "Multilingual",
        "Improved coding",
        "Open weights available"
      ],
      "bestFor": "Enterprise coding, European data sovereignty, multilingual tasks",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-06",
      "url": "https://mistral.ai/technology/#pricing"
    },
    {
      "id": "grok-code",
      "name": "Grok Code",
      "provider": "xAI",
      "category": [
        "coding"
      ],
      "pricing": {
        "input": 1.5,
        "output": 7.5
      },
      "description": "xAI's coding-specialized model. Optimized for code generation, debugging, and software engineering tasks.",
      "features": [
        "Code-optimized",
        "Multi-language support",
        "Real-time docs access"
      ],
      "bestFor": "Code generation, debugging, software development workflows",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-05",
      "url": "https://docs.x.ai/docs/pricing"
    },
    {
      "id": "gpt-4.1",
      "name": "GPT-4.1",
      "provider": "OpenAI",
      "category": [
        "general-purpose",
        "coding"
      ],
      "pricing": {
        "input": 2,
        "output": 8
      },
      "description": "Updated GPT-4 generation with improved instruction following and reduced hallucination. Better coding accuracy than GPT-4o.",
      "features": [
        "Improved accuracy",
        "Reduced hallucination",
        "Better instruction following"
      ],
      "bestFor": "Production coding, API development, complex instructions",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-04",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "gemini-2-5-pro",
      "name": "Gemini 2.5 Pro",
      "provider": "Google",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 1.25,
        "output": 10
      },
      "description": "Google's most capable model. Strong coding, multimodal understanding, and very competitive pricing.",
      "features": [
        "Multimodal",
        "1M+ context window",
        "Competitive pricing"
      ],
      "bestFor": "Long document analysis, coding, multimodal tasks",
      "contextWindow": "1M tokens",
      "releaseDate": "2025-04",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "gemini-2-0-pro",
      "name": "Gemini 2.0 Pro",
      "provider": "Google",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 2.5,
        "output": 10
      },
      "description": "Mid-tier Gemini 2.0 model. Better quality than Flash at a competitive price point for enterprise coding tasks.",
      "features": [
        "Multimodal",
        "Strong reasoning",
        "Long context"
      ],
      "bestFor": "Enterprise coding, complex analysis, multimodal understanding",
      "contextWindow": "1M tokens",
      "releaseDate": "2024-12",
      "url": "https://ai.google.dev/pricing"
    },
    {
      "id": "gpt-4o",
      "name": "GPT-4o",
      "provider": "OpenAI",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 2.5,
        "output": 10,
        "cacheRead": 1.25
      },
      "description": "OpenAI's flagship multimodal model. Strong coding and reasoning at competitive pricing.",
      "features": [
        "Multimodal",
        "Function calling",
        "Vision"
      ],
      "bestFor": "General coding, multimodal tasks, chatbots",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-05",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "claude-3-sonnet",
      "name": "Claude 3 Sonnet",
      "provider": "Anthropic",
      "category": [
        "general-purpose"
      ],
      "pricing": {
        "input": 3,
        "output": 15
      },
      "description": "First generation Sonnet. Balanced performance for general tasks.",
      "features": [
        "Balanced performance",
        "Fast inference"
      ],
      "bestFor": "General-purpose coding and reasoning",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-02",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "grok-3",
      "name": "Grok 3",
      "provider": "xAI",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 3,
        "output": 15
      },
      "description": "xAI's flagship model. Strong general-purpose capability with real-time knowledge access through X platform integration.",
      "features": [
        "Real-time knowledge",
        "Strong reasoning",
        "Large context window"
      ],
      "bestFor": "General coding, research tasks, current-event-aware applications",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-02",
      "url": "https://docs.x.ai/docs/pricing"
    },
    {
      "id": "claude-sonnet-4",
      "name": "Claude Sonnet 4",
      "provider": "Anthropic",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 3,
        "output": 15,
        "cacheRead": 0.3,
        "cacheCreate": 3.75
      },
      "description": "Anthropic's balanced model for coding and general tasks. Best price-performance ratio in the Claude family.",
      "features": [
        "Prompt caching",
        "Extended thinking",
        "Large context window"
      ],
      "bestFor": "Day-to-day coding, code review, documentation",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-05",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "claude-3-5-sonnet",
      "name": "Claude 3.5 Sonnet",
      "provider": "Anthropic",
      "category": [
        "general-purpose",
        "coding"
      ],
      "pricing": {
        "input": 3,
        "output": 15,
        "cacheRead": 0.3,
        "cacheCreate": 3.75
      },
      "description": "Previous generation Sonnet. Still excellent for coding tasks at the same price point.",
      "features": [
        "Prompt caching",
        "Computer use",
        "Artifact generation"
      ],
      "bestFor": "Coding assistants, web development, data analysis",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-10",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "qwen3-6-plus",
      "name": "Qwen 3.6 Plus",
      "provider": "Qwen",
      "category": [
        "general-purpose",
        "coding"
      ],
      "pricing": {
        "input": 3,
        "output": 15,
        "cacheRead": 0.3,
        "cacheCreate": 3.75
      },
      "description": "Qwen's latest general-purpose model. Competitive with Claude Sonnet pricing.",
      "features": [
        "Strong Chinese language support",
        "Coding capability",
        "Prompt caching"
      ],
      "bestFor": "Chinese/English bilingual coding, general tasks",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-06",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "qwen-3-max",
      "name": "Qwen 3 Max",
      "provider": "Qwen",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 5,
        "output": 20
      },
      "description": "Flagship Qwen 3 model. Top-tier reasoning and coding, competitive with Claude Opus and GPT-4.1.",
      "features": [
        "Highest reasoning in Qwen family",
        "Multilingual",
        "Strong Chinese support"
      ],
      "bestFor": "Complex coding, advanced reasoning, bilingual enterprise applications",
      "contextWindow": "256K tokens",
      "releaseDate": "2025-09",
      "url": "https://help.aliyun.com/zh/model-studio/developer-reference/price"
    },
    {
      "id": "grok-3-vision",
      "name": "Grok 3 Vision",
      "provider": "xAI",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 5,
        "output": 20
      },
      "description": "xAI's multimodal vision model. Combines Grok 3's reasoning with image and diagram understanding.",
      "features": [
        "Multimodal vision",
        "Diagram understanding",
        "Real-time knowledge"
      ],
      "bestFor": "UI/UX analysis, diagram-to-code, visual debugging",
      "contextWindow": "128K tokens",
      "releaseDate": "2025-04",
      "url": "https://docs.x.ai/docs/pricing"
    },
    {
      "id": "grok-4",
      "name": "Grok 4",
      "provider": "xAI",
      "category": [
        "reasoning",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 5,
        "output": 25
      },
      "description": "Next-generation xAI model with enhanced reasoning and coding capability. Competes with Claude Opus and o3 Pro tier.",
      "features": [
        "Enhanced reasoning",
        "Advanced coding",
        "Real-time knowledge"
      ],
      "bestFor": "Complex reasoning, advanced coding, enterprise applications",
      "contextWindow": "256K tokens",
      "releaseDate": "2025-09",
      "url": "https://docs.x.ai/docs/pricing"
    },
    {
      "id": "gpt-4-turbo",
      "name": "GPT-4 Turbo",
      "provider": "OpenAI",
      "category": [
        "general-purpose",
        "enterprise"
      ],
      "pricing": {
        "input": 10,
        "output": 30
      },
      "description": "Previous generation high-performance model. Good for complex reasoning tasks.",
      "features": [
        "Knowledge cutoff 2024-04",
        "JSON mode",
        "Function calling"
      ],
      "bestFor": "Complex reasoning, data extraction, analysis",
      "contextWindow": "128K tokens",
      "releaseDate": "2024-04",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o3",
      "name": "OpenAI o3",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "coding"
      ],
      "pricing": {
        "input": 10,
        "output": 40
      },
      "description": "Next generation reasoning model. Improved coding and math over o1.",
      "features": [
        "Advanced reasoning",
        "Improved coding"
      ],
      "bestFor": "Advanced coding, complex problem-solving",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-02",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o1",
      "name": "OpenAI o1",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "coding"
      ],
      "pricing": {
        "input": 15,
        "output": 60
      },
      "description": "Reasoning model optimized for complex problem-solving. Excels at math, science, and advanced coding.",
      "features": [
        "Chain of thought reasoning",
        "Math optimization",
        "Science tasks"
      ],
      "bestFor": "Complex math, advanced coding, scientific reasoning",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-09",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "claude-3-opus",
      "name": "Claude 3 Opus",
      "provider": "Anthropic",
      "category": [
        "general-purpose"
      ],
      "pricing": {
        "input": 15,
        "output": 75
      },
      "description": "First generation Opus. Highest reasoning capability in the Claude 3 family.",
      "features": [
        "High reasoning capability",
        "Long context"
      ],
      "bestFor": "Deep analysis, complex coding tasks",
      "contextWindow": "200K tokens",
      "releaseDate": "2024-02",
      "url": "https://www.anthropic.com/pricing"
    },
    {
      "id": "gpt-4",
      "name": "GPT-4",
      "provider": "OpenAI",
      "category": [
        "general-purpose"
      ],
      "pricing": {
        "input": 30,
        "output": 60
      },
      "description": "Original GPT-4. Most expensive OpenAI model, largely superseded by newer options.",
      "features": [
        "Original GPT-4 capability"
      ],
      "bestFor": "Legacy applications requiring GPT-4 specifically",
      "contextWindow": "8K tokens",
      "releaseDate": "2023-03",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o1-pro",
      "name": "OpenAI o1 Pro",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "enterprise"
      ],
      "pricing": {
        "input": 20,
        "output": 80
      },
      "description": "Premium reasoning model with extended compute. Best-in-class for complex math, science, and advanced coding challenges.",
      "features": [
        "Extended chain of thought",
        "Premium compute allocation",
        "Highest reasoning accuracy"
      ],
      "bestFor": "Research-grade problem solving, competition math, complex system design",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-03",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "o3-pro",
      "name": "OpenAI o3 Pro",
      "provider": "OpenAI",
      "category": [
        "reasoning",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 20,
        "output": 80
      },
      "description": "Top-tier reasoning model combining o3's coding strength with extended compute. The most powerful OpenAI model for reasoning-heavy coding.",
      "features": [
        "Extended chain of thought",
        "Advanced coding reasoning",
        "Premium compute"
      ],
      "bestFor": "Complex algorithm design, system architecture, research coding",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-06",
      "url": "https://openai.com/api/pricing/"
    },
    {
      "id": "claude-opus-4",
      "name": "Claude Opus 4",
      "provider": "Anthropic",
      "category": [
        "general-purpose",
        "coding",
        "enterprise"
      ],
      "pricing": {
        "input": 15,
        "output": 75,
        "cacheRead": 1.5,
        "cacheCreate": 18.75
      },
      "description": "Anthropic's most powerful model. Best for complex reasoning and challenging coding tasks.",
      "features": [
        "Prompt caching",
        "Extended thinking",
        "Highest accuracy"
      ],
      "bestFor": "Complex architecture decisions, debugging hard bugs, research",
      "contextWindow": "200K tokens",
      "releaseDate": "2025-05",
      "url": "https://www.anthropic.com/pricing"
    }
  ],
  "lastUpdated": "2026-04-17T19:44:59.105Z",
  "totalTools": 61
}