{"ok":true,"snapshot":{"date":"2026-05-04","capturedAt":"2026-05-04T22:25:19.472Z","total_models":372,"models":[{"id":"~anthropic/claude-haiku-latest","name":"Anthropic Claude Haiku Latest","description":"This model always redirects to the latest model in the Anthropic Claude Haiku family.","created":1777318492,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0.000001,"completion":0.000005,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"~anthropic/claude-opus-latest","name":"Anthropic: Claude Opus Latest","description":"This model always redirects to the latest model in the Claude Opus family.","created":1776795361,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0.000005,"completion":0.000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","tool_choice","tools","verbosity"]},{"id":"~anthropic/claude-sonnet-latest","name":"Anthropic Claude Sonnet Latest","description":"This model always redirects to the latest model in the Anthropic Claude Sonnet family.","created":1777318368,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"]},{"id":"~google/gemini-flash-latest","name":"Google Gemini Flash Latest","description":"This model always redirects to the latest model in the Google Gemini Flash family.","created":1777318398,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":5e-7,"completion":0.000003,"image":5e-7,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"~google/gemini-pro-latest","name":"Google Gemini Pro Latest","description":"This model always redirects to the latest model in the Google Gemini Pro family.","created":1777318451,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0.000002,"completion":0.000012,"image":0.000002,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"~moonshotai/kimi-latest","name":"MoonshotAI Kimi Latest","description":"This model always redirects to the latest model in the MoonshotAI Kimi family.","created":1777318428,"context_length":262142,"modality":"text+image->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":7.4e-7,"completion":0.00000349,"image":null,"request":null},"top_provider":{"max_completion_tokens":262142,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"~openai/gpt-latest","name":"OpenAI GPT Latest","description":"This model always redirects to the latest model in the OpenAI GPT family.","created":1777318334,"context_length":1050000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0.000005,"completion":0.00003,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"~openai/gpt-mini-latest","name":"OpenAI GPT Mini Latest","description":"This model always redirects to the latest model in the OpenAI GPT Mini family.","created":1777318471,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":7.5e-7,"completion":0.0000045,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"ai21/jamba-large-1.7","name":"AI21: Jamba Large 1.7","description":"Jamba Large 1.7 is the latest model in the Jamba open family, offering improvements in grounding, instruction-following, and overall efficiency. Built on a hybrid SSM-Transformer architecture with a 256K context...","created":1754669020,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["max_tokens","response_format","stop","temperature","tool_choice","tools","top_p"]},{"id":"aion-labs/aion-1.0","name":"AionLabs: Aion-1.0","description":"Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree...","created":1738697557,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000004,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"]},{"id":"aion-labs/aion-1.0-mini","name":"AionLabs: Aion-1.0-Mini","description":"Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant...","created":1738697107,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7e-7,"completion":0.0000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"]},{"id":"aion-labs/aion-2.0","name":"AionLabs: Aion-2.0","description":"Aion-2.0 is a variant of DeepSeek V3.2 optimized for immersive roleplaying and storytelling. It is particularly strong at introducing tension, crises, and conflict into stories, making narratives feel more engaging....","created":1771881306,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8e-7,"completion":0.0000016,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","top_p"]},{"id":"aion-labs/aion-rp-llama-3.1-8b","name":"AionLabs: Aion-RP 1.0 (8B)","description":"Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses. It is a fine-tuned base model...","created":1738696718,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8e-7,"completion":0.0000016,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","temperature","top_p"]},{"id":"alfredpros/codellama-7b-instruct-solidity","name":"AlfredPros: CodeLLaMa 7B Instruct Solidity","description":"A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.","created":1744641874,"context_length":4096,"modality":"text->text","instruct_type":"alpaca","tokenizer":"Other","pricing":{"prompt":8e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"alibaba/tongyi-deepresearch-30b-a3b","name":"Tongyi DeepResearch 30B A3B","description":"Tongyi DeepResearch is an agentic large language model developed by Tongyi Lab, with 30 billion total parameters activating only 3 billion per token. It's optimized for long-horizon, deep information-seeking tasks...","created":1758210804,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":9e-8,"completion":4.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"allenai/olmo-3-32b-think","name":"AllenAI: Olmo 3 32B Think","description":"Olmo 3 32B Think is a large-scale, 32-billion-parameter model purpose-built for deep reasoning, complex logic chains and advanced instruction-following scenarios. Its capacity enables strong performance on demanding evaluation tasks and...","created":1763758276,"context_length":65536,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.5e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"allenai/olmo-3.1-32b-instruct","name":"AllenAI: Olmo 3.1 32B Instruct","description":"Olmo 3.1 32B Instruct is a large-scale, 32-billion-parameter instruction-tuned language model engineered for high-performance conversational AI, multi-turn dialogue, and practical instruction following. As part of the Olmo 3.1 family, this...","created":1767728554,"context_length":65536,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"alpindale/goliath-120b","name":"Goliath 120B","description":"A large LLM created by combining two fine-tuned Llama 70B models into one 120B model. Combines Xwin and Euryale. Credits to - [@chargoddard](https://huggingface.co/chargoddard) for developing the framework used to merge...","created":1699574400,"context_length":6144,"modality":"text->text","instruct_type":"airoboros","tokenizer":"Llama2","pricing":{"prompt":0.00000375,"completion":0.0000075,"image":null,"request":null},"top_provider":{"max_completion_tokens":1024,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"]},{"id":"amazon/nova-2-lite-v1","name":"Amazon: Nova 2 Lite","description":"Nova 2 Lite is a fast, cost-effective reasoning model for everyday workloads that can process text, images, and videos to generate text. Nova 2 Lite demonstrates standout capabilities in processing...","created":1764696672,"context_length":1000000,"modality":"text+image+file+video->text","instruct_type":null,"tokenizer":"Nova","pricing":{"prompt":3e-7,"completion":0.0000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"amazon/nova-lite-v1","name":"Amazon: Nova Lite 1.0","description":"Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite...","created":1733437363,"context_length":300000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Nova","pricing":{"prompt":6e-8,"completion":2.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":5120,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"]},{"id":"amazon/nova-micro-v1","name":"Amazon: Nova Micro 1.0","description":"Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length...","created":1733437237,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Nova","pricing":{"prompt":3.5e-8,"completion":1.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":5120,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"]},{"id":"amazon/nova-premier-v1","name":"Amazon: Nova Premier 1.0","description":"Amazon Nova Premier is the most capable of Amazon’s multimodal models for complex reasoning tasks and for use as the best teacher for distilling custom models.","created":1761950332,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Nova","pricing":{"prompt":0.0000025,"completion":0.0000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":32000,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"]},{"id":"amazon/nova-pro-v1","name":"Amazon: Nova Pro 1.0","description":"Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December...","created":1733436303,"context_length":300000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Nova","pricing":{"prompt":8e-7,"completion":0.0000032,"image":null,"request":null},"top_provider":{"max_completion_tokens":5120,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tools","top_k","top_p"]},{"id":"anthracite-org/magnum-v4-72b","name":"Magnum v4 72B","description":"This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus).\n\nThe model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).","created":1729555200,"context_length":16384,"modality":"text->text","instruct_type":"chatml","tokenizer":"Qwen","pricing":{"prompt":0.000003,"completion":0.000005,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"]},{"id":"anthropic/claude-3-haiku","name":"Anthropic: Claude 3 Haiku","description":"Claude 3 Haiku is Anthropic's fastest and most compact model for\nnear-instant responsiveness. Quick and accurate targeted performance.\n\nSee the launch announcement and benchmark results [here](https://www.anthropic.com/news/claude-3-haiku)\n\n#multimodal","created":1710288000,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":2.5e-7,"completion":0.00000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-3.5-haiku","name":"Anthropic: Claude 3.5 Haiku","description":"Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic...","created":1730678400,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":8e-7,"completion":0.000004,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-3.7-sonnet","name":"Anthropic: Claude 3.7 Sonnet","description":"Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...","created":1740422110,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-3.7-sonnet:thinking","name":"Anthropic: Claude 3.7 Sonnet (thinking)","description":"Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...","created":1740422110,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_p"]},{"id":"anthropic/claude-haiku-4.5","name":"Anthropic: Claude Haiku 4.5","description":"Claude Haiku 4.5 is Anthropic’s fastest and most efficient model, delivering near-frontier intelligence at a fraction of the cost and latency of larger Claude models. Matching Claude Sonnet 4’s performance...","created":1760547638,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000001,"completion":0.000005,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-opus-4","name":"Anthropic: Claude Opus 4","description":"Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in...","created":1747931245,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000015,"completion":0.000075,"image":null,"request":null},"top_provider":{"max_completion_tokens":32000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-opus-4.1","name":"Anthropic: Claude Opus 4.1","description":"Claude Opus 4.1 is an updated version of Anthropic’s flagship model, offering improved performance in coding, reasoning, and agentic tasks. It achieves 74.5% on SWE-bench Verified and shows notable gains...","created":1754411591,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000015,"completion":0.000075,"image":null,"request":null},"top_provider":{"max_completion_tokens":32000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-opus-4.5","name":"Anthropic: Claude Opus 4.5","description":"Claude Opus 4.5 is Anthropic’s frontier reasoning model optimized for complex software engineering, agentic workflows, and long-horizon computer use. It offers strong multimodal capabilities, competitive performance across real-world coding and...","created":1764010580,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000005,"completion":0.000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","verbosity"]},{"id":"anthropic/claude-opus-4.6","name":"Anthropic: Claude Opus 4.6","description":"Opus 4.6 is Anthropic’s strongest model for coding and long-running professional tasks. It is built for agents that operate across entire workflows rather than single prompts, making it especially effective...","created":1770219050,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000005,"completion":0.000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"]},{"id":"anthropic/claude-opus-4.6-fast","name":"Anthropic: Claude Opus 4.6 (Fast)","description":"Fast-mode variant of [Opus 4.6](/anthropic/claude-opus-4.6) - identical capabilities with higher output speed at premium 6x pricing.\n\nLearn more in Anthropic's docs: https://platform.claude.com/docs/en/build-with-claude/fast-mode","created":1775592472,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.00003,"completion":0.00015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p","verbosity"]},{"id":"anthropic/claude-opus-4.7","name":"Anthropic: Claude Opus 4.7","description":"Opus 4.7 is the next generation of Anthropic's Opus family, built for long-running, asynchronous agents. Building on the coding and agentic strengths of Opus 4.6, it delivers stronger performance on...","created":1776351100,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000005,"completion":0.000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","tool_choice","tools","verbosity"]},{"id":"anthropic/claude-sonnet-4","name":"Anthropic: Claude Sonnet 4","description":"Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%),...","created":1747930371,"context_length":1000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-sonnet-4.5","name":"Anthropic: Claude Sonnet 4.5","description":"Claude Sonnet 4.5 is Anthropic’s most advanced Sonnet model to date, optimized for real-world agents and coding workflows. It delivers state-of-the-art performance on coding benchmarks such as SWE-bench Verified, with...","created":1759161676,"context_length":1000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"anthropic/claude-sonnet-4.6","name":"Anthropic: Claude Sonnet 4.6","description":"Sonnet 4.6 is Anthropic's most capable Sonnet-class model yet, with frontier performance across coding, agents, and professional work. It excels at iterative development, complex codebase navigation, end-to-end project management with...","created":1771342990,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Claude","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p","verbosity"]},{"id":"arcee-ai/coder-large","name":"Arcee AI: Coder Large","description":"Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora. It supports a 32k context window, enabling multi‑file...","created":1746478663,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":5e-7,"completion":8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"]},{"id":"arcee-ai/maestro-reasoning","name":"Arcee AI: Maestro Reasoning","description":"Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic. Compared to the earlier 7 B...","created":1746481269,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":9e-7,"completion":0.0000033,"image":null,"request":null},"top_provider":{"max_completion_tokens":32000,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"]},{"id":"arcee-ai/spotlight","name":"Arcee AI: Spotlight","description":"Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks. It offers a 32 k‑token context window, enabling rich multimodal...","created":1746481552,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.8e-7,"completion":1.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65537,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"]},{"id":"arcee-ai/trinity-large-preview","name":"Arcee AI: Trinity Large Preview","description":"Trinity-Large-Preview is a frontier-scale open-weight language model from Arcee, built as a 400B-parameter sparse Mixture-of-Experts with 13B active parameters per token using 4-of-256 expert routing. It excels in creative writing,...","created":1769552670,"context_length":131000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.5e-7,"completion":4.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["max_tokens","response_format","structured_outputs","temperature","tools","top_k","top_p"]},{"id":"arcee-ai/trinity-large-thinking","name":"Arcee AI: Trinity Large Thinking","description":"Trinity Large Thinking is a powerful open source reasoning model from the team at Arcee AI. It shows strong performance in PinchBench, agentic workloads, and reasoning tasks. Launch video: https://youtu.be/Gc82AXLa0Rg?si=4RLn6WBz33qT--B7","created":1775058318,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.2e-7,"completion":8.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"arcee-ai/trinity-mini","name":"Arcee AI: Trinity Mini","description":"Trinity Mini is a 26B-parameter (3B active) sparse mixture-of-experts language model featuring 128 experts with 8 active per token. Engineered for efficient reasoning over long contexts (131k) with robust function...","created":1764601720,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4.5e-8,"completion":1.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"arcee-ai/virtuoso-large","name":"Arcee AI: Virtuoso Large","description":"Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k...","created":1746478885,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7.5e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":64000,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"baidu/ernie-4.5-21b-a3b","name":"Baidu: ERNIE 4.5 21B A3B","description":"A sophisticated text-based Mixture-of-Experts (MoE) model featuring 21B total parameters with 3B activated per token, delivering exceptional multimodal understanding and generation through heterogeneous MoE structures and modality-isolated routing. Supporting an...","created":1755034167,"context_length":120000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7e-8,"completion":2.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"baidu/ernie-4.5-21b-a3b-thinking","name":"Baidu: ERNIE 4.5 21B A3B Thinking","description":"ERNIE-4.5-21B-A3B-Thinking is Baidu's upgraded lightweight MoE model, refined to boost reasoning depth and quality for top-tier performance in logical puzzles, math, science, coding, text generation, and expert-level academic benchmarks.","created":1760048887,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7e-8,"completion":2.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"baidu/ernie-4.5-300b-a47b","name":"Baidu: ERNIE 4.5 300B A47B","description":"ERNIE-4.5-300B-A47B is a 300B parameter Mixture-of-Experts (MoE) language model developed by Baidu as part of the ERNIE 4.5 series. It activates 47B parameters per token and supports text generation in...","created":1751300139,"context_length":123000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.8e-7,"completion":0.0000011,"image":null,"request":null},"top_provider":{"max_completion_tokens":12000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"baidu/ernie-4.5-vl-28b-a3b","name":"Baidu: ERNIE 4.5 VL 28B A3B","description":"A powerful multimodal Mixture-of-Experts chat model featuring 28B total parameters with 3B activated per token, delivering exceptional text and vision understanding through its innovative heterogeneous MoE structure with modality-isolated routing....","created":1755032836,"context_length":30000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.4e-7,"completion":5.6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"baidu/ernie-4.5-vl-424b-a47b","name":"Baidu: ERNIE 4.5 VL 424B A47B","description":"ERNIE-4.5-VL-424B-A47B is a multimodal Mixture-of-Experts (MoE) model from Baidu’s ERNIE 4.5 series, featuring 424B total parameters with 47B active per token. It is trained jointly on text and image data...","created":1751300903,"context_length":123000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4.2e-7,"completion":0.00000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":16000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"baidu/qianfan-ocr-fast:free","name":"Baidu: Qianfan-OCR-Fast (free)","description":"Qianfan-OCR-Fast is a domain-specific multimodal large model purpose-built for OCR. By leveraging specialized OCR training data while preserving versatile multimodal intelligence, it provides a powerful performance upgrade over Qianfan-OCR.","created":1776707472,"context_length":65536,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":28672,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_p"]},{"id":"bytedance-seed/seed-1.6","name":"ByteDance Seed: Seed 1.6","description":"Seed 1.6 is a general-purpose model released by the ByteDance Seed team. It incorporates multimodal capabilities and adaptive deep thinking with a 256K context window.","created":1766504997,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.5e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"bytedance-seed/seed-1.6-flash","name":"ByteDance Seed: Seed 1.6 Flash","description":"Seed 1.6 Flash is an ultra-fast multimodal deep thinking model by ByteDance Seed, supporting both text and visual understanding. It features a 256k context window and can generate outputs of...","created":1766505011,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7.5e-8,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"bytedance-seed/seed-2.0-lite","name":"ByteDance Seed: Seed-2.0-Lite","description":"Seed-2.0-Lite is a versatile, cost‑efficient enterprise workhorse that delivers strong multimodal and agent capabilities while offering noticeably lower latency, making it a practical default choice for most production workloads across...","created":1773157231,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.5e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"bytedance-seed/seed-2.0-mini","name":"ByteDance Seed: Seed-2.0-Mini","description":"Seed-2.0-mini targets latency-sensitive, high-concurrency, and cost-sensitive scenarios, emphasizing fast response and flexible inference deployment. It delivers performance comparable to ByteDance-Seed-1.6, supports 256k context, four reasoning effort modes (minimal/low/medium/high), multimodal understanding,...","created":1772131107,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"bytedance/ui-tars-1.5-7b","name":"ByteDance: UI-TARS 7B","description":"UI-TARS-1.5 is a multimodal vision-language agent optimized for GUI-based environments, including desktop interfaces, web browsers, mobile systems, and games. Built by ByteDance, it builds upon the UI-TARS framework with reinforcement...","created":1753205056,"context_length":128000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"cognitivecomputations/dolphin-mistral-24b-venice-edition:free","name":"Venice: Uncensored (free)","description":"Venice Uncensored Dolphin Mistral 24B Venice Edition is a fine-tuned variant of Mistral-Small-24B-Instruct-2501, developed by dphn.ai in collaboration with Venice.ai. This model is designed as an “uncensored” instruct-tuned LLM, preserving...","created":1752094966,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"cohere/command-a","name":"Cohere: Command A","description":"Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases. Compared to other leading proprietary...","created":1741894342,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":true},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"cohere/command-r-08-2024","name":"Cohere: Command R (08-2024)","description":"command-r-08-2024 is an update of the [Command R](/models/cohere/command-r) with improved performance for multilingual retrieval-augmented generation (RAG) and tool use. More broadly, it is better at math, code and reasoning and...","created":1724976000,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Cohere","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":4000,"is_moderated":true},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"cohere/command-r-plus-08-2024","name":"Cohere: Command R+ (08-2024)","description":"command-r-plus-08-2024 is an update of the [Command R+](/models/cohere/command-r-plus) with roughly 50% higher throughput and 25% lower latencies as compared to the previous Command R+ version, while keeping the hardware footprint...","created":1724976000,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Cohere","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":4000,"is_moderated":true},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"cohere/command-r7b-12-2024","name":"Cohere: Command R7B (12-2024)","description":"Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning...","created":1734158152,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Cohere","pricing":{"prompt":3.75e-8,"completion":1.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":4000,"is_moderated":true},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"deepcogito/cogito-v2.1-671b","name":"Deep Cogito: Cogito v2.1 671B","description":"Cogito v2.1 671B MoE represents one of the strongest open models globally, matching performance of frontier closed and open models. This model is trained using self play with reinforcement learning...","created":1763071233,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.00000125,"completion":0.00000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"deepseek/deepseek-chat","name":"DeepSeek: DeepSeek V3","description":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations...","created":1735241320,"context_length":163840,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":3.2e-7,"completion":8.9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-chat-v3-0324","name":"DeepSeek: DeepSeek V3 0324","description":"DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team. It succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well...","created":1742824755,"context_length":163840,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":2e-7,"completion":7.7e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-chat-v3.1","name":"DeepSeek: DeepSeek V3.1","description":"DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active) that supports both thinking and non-thinking modes via prompt templates. It extends the DeepSeek-V3 base with a two-phase long-context...","created":1755779628,"context_length":32768,"modality":"text->text","instruct_type":"deepseek-v3.1","tokenizer":"DeepSeek","pricing":{"prompt":1.5e-7,"completion":7.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":7168,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"deepseek/deepseek-r1","name":"DeepSeek: R1","description":"DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass....","created":1737381095,"context_length":64000,"modality":"text->text","instruct_type":"deepseek-r1","tokenizer":"DeepSeek","pricing":{"prompt":7e-7,"completion":0.0000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":16000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_completion_tokens","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-r1-0528","name":"DeepSeek: R1 0528","description":"May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active...","created":1748455170,"context_length":163840,"modality":"text->text","instruct_type":"deepseek-r1","tokenizer":"DeepSeek","pricing":{"prompt":5e-7,"completion":0.00000215,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-r1-distill-llama-70b","name":"DeepSeek: R1 Distill Llama 70B","description":"DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across...","created":1737663169,"context_length":131072,"modality":"text->text","instruct_type":"deepseek-r1","tokenizer":"Llama3","pricing":{"prompt":7e-7,"completion":8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"]},{"id":"deepseek/deepseek-r1-distill-qwen-32b","name":"DeepSeek: R1 Distill Qwen 32B","description":"DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new...","created":1738194830,"context_length":32768,"modality":"text->text","instruct_type":"deepseek-r1","tokenizer":"Qwen","pricing":{"prompt":2.9e-7,"completion":2.9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"]},{"id":"deepseek/deepseek-v3.1-terminus","name":"DeepSeek: DeepSeek V3.1 Terminus","description":"DeepSeek-V3.1 Terminus is an update to [DeepSeek V3.1](/deepseek/deepseek-chat-v3.1) that maintains the model's original capabilities while addressing issues reported by users, including language consistency and agent capabilities, further optimizing the model's...","created":1758548275,"context_length":163840,"modality":"text->text","instruct_type":"deepseek-v3.1","tokenizer":"DeepSeek","pricing":{"prompt":2.7e-7,"completion":9.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-v3.2","name":"DeepSeek: DeepSeek V3.2","description":"DeepSeek-V3.2 is a large language model designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism...","created":1764594642,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":2.52e-7,"completion":3.78e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-v3.2-exp","name":"DeepSeek: DeepSeek V3.2 Exp","description":"DeepSeek-V3.2-Exp is an experimental large language model released by DeepSeek as an intermediate step between V3.1 and future architectures. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism...","created":1759150481,"context_length":163840,"modality":"text->text","instruct_type":"deepseek-v3.1","tokenizer":"DeepSeek","pricing":{"prompt":2.7e-7,"completion":4.1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"deepseek/deepseek-v3.2-speciale","name":"DeepSeek: DeepSeek V3.2 Speciale","description":"DeepSeek-V3.2-Speciale is a high-compute variant of DeepSeek-V3.2 optimized for maximum reasoning and agentic performance. It builds on DeepSeek Sparse Attention (DSA) for efficient long-context processing, then scales post-training reinforcement learning...","created":1764594837,"context_length":163840,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":4e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":163840,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"deepseek/deepseek-v4-flash","name":"DeepSeek: DeepSeek V4 Flash","description":"DeepSeek V4 Flash is an efficiency-optimized Mixture-of-Experts model from DeepSeek with 284B total parameters and 13B activated parameters, supporting a 1M-token context window. It is designed for fast inference and...","created":1777000666,"context_length":1048576,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":1.4e-7,"completion":2.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":384000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"deepseek/deepseek-v4-pro","name":"DeepSeek: DeepSeek V4 Pro","description":"DeepSeek V4 Pro is a large-scale Mixture-of-Experts model from DeepSeek with 1.6T total parameters and 49B activated parameters, supporting a 1M-token context window. It is designed for advanced reasoning, coding,...","created":1777000679,"context_length":1048576,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":4.35e-7,"completion":8.7e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":384000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"essentialai/rnj-1-instruct","name":"EssentialAI: Rnj 1 Instruct","description":"Rnj-1 is an 8B-parameter, dense, open-weight model family developed by Essential AI and trained from scratch with a focus on programming, math, and scientific reasoning. The model demonstrates strong performance...","created":1765094847,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.5e-7,"completion":1.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"google/gemini-2.0-flash-001","name":"Google: Gemini 2.0 Flash","description":"Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It...","created":1738769413,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":1e-7,"completion":4e-7,"image":1e-7,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.0-flash-lite-001","name":"Google: Gemini 2.0 Flash Lite","description":"Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5),...","created":1740506212,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":7.5e-8,"completion":3e-7,"image":7.5e-8,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-flash","name":"Google: Gemini 2.5 Flash","description":"Gemini 2.5 Flash is Google's state-of-the-art workhorse model, specifically designed for advanced reasoning, coding, mathematics, and scientific tasks. It includes built-in \"thinking\" capabilities, enabling it to provide responses with greater...","created":1750172488,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":3e-7,"completion":0.0000025,"image":3e-7,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-flash-image","name":"Google: Nano Banana (Gemini 2.5 Flash Image)","description":"Gemini 2.5 Flash Image, a.k.a. \"Nano Banana,\" is now generally available. It is a state of the art image generation model with contextual understanding. It is capable of image generation,...","created":1759870431,"context_length":32768,"modality":"text+image->text+image","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":3e-7,"completion":0.0000025,"image":3e-7,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","stop","structured_outputs","temperature","top_p"]},{"id":"google/gemini-2.5-flash-lite","name":"Google: Gemini 2.5 Flash Lite","description":"Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance...","created":1753200276,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":1e-7,"completion":4e-7,"image":1e-7,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-flash-lite-preview-09-2025","name":"Google: Gemini 2.5 Flash Lite Preview 09-2025","description":"Gemini 2.5 Flash-Lite is a lightweight reasoning model in the Gemini 2.5 family, optimized for ultra-low latency and cost efficiency. It offers improved throughput, faster token generation, and better performance...","created":1758819686,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":1e-7,"completion":4e-7,"image":1e-7,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-pro","name":"Google: Gemini 2.5 Pro","description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","created":1750169544,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.00000125,"completion":0.00001,"image":0.00000125,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-pro-preview","name":"Google: Gemini 2.5 Pro Preview 06-05","description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","created":1749137257,"context_length":1048576,"modality":"text+image+file+audio->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.00000125,"completion":0.00001,"image":0.00000125,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-2.5-pro-preview-05-06","name":"Google: Gemini 2.5 Pro Preview 05-06","description":"Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...","created":1746578513,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.00000125,"completion":0.00001,"image":0.00000125,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-3-flash-preview","name":"Google: Gemini 3 Flash Preview","description":"Gemini 3 Flash Preview is a high speed, high value thinking model designed for agentic workflows, multi turn chat, and coding assistance. It delivers near Pro level reasoning and tool...","created":1765987078,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":5e-7,"completion":0.000003,"image":5e-7,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-3-pro-image-preview","name":"Google: Nano Banana Pro (Gemini 3 Pro Image Preview)","description":"Nano Banana Pro is Google’s most advanced image-generation and editing model, built on Gemini 3 Pro. It extends the original Nano Banana with significantly improved multimodal reasoning, real-world grounding, and...","created":1763653797,"context_length":65536,"modality":"text+image->text+image","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.000002,"completion":0.000012,"image":0.000002,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","top_p"]},{"id":"google/gemini-3.1-flash-image-preview","name":"Google: Nano Banana 2 (Gemini 3.1 Flash Image Preview)","description":"Gemini 3.1 Flash Image Preview, a.k.a. \"Nano Banana 2,\" is Google’s latest state of the art image generation and editing model, delivering Pro-level visual quality at Flash speed. It combines...","created":1772119558,"context_length":65536,"modality":"text+image->text+image","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":5e-7,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","top_p"]},{"id":"google/gemini-3.1-flash-lite-preview","name":"Google: Gemini 3.1 Flash Lite Preview","description":"Gemini 3.1 Flash Lite Preview is Google's high-efficiency model optimized for high-volume use cases. It outperforms Gemini 2.5 Flash Lite on overall quality and approaches Gemini 2.5 Flash performance across...","created":1772512673,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":2.5e-7,"completion":0.0000015,"image":2.5e-7,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-3.1-pro-preview","name":"Google: Gemini 3.1 Pro Preview","description":"Gemini 3.1 Pro Preview is Google’s frontier reasoning model, delivering enhanced software engineering performance, improved agentic reliability, and more efficient token usage across complex workflows. Building on the multimodal foundation...","created":1771509627,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.000002,"completion":0.000012,"image":0.000002,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemini-3.1-pro-preview-customtools","name":"Google: Gemini 3.1 Pro Preview Custom Tools","description":"Gemini 3.1 Pro Preview Custom Tools is a variant of Gemini 3.1 Pro that improves tool selection behavior by preventing overuse of a general bash tool when more efficient third-party...","created":1772045923,"context_length":1048576,"modality":"text+image+file+audio+video->text","instruct_type":null,"tokenizer":"Gemini","pricing":{"prompt":0.000002,"completion":0.000012,"image":0.000002,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"google/gemma-2-27b-it","name":"Google: Gemma 2 27B","description":"Gemma 2 27B by Google is an open model built from the same research and technology used to create the [Gemini models](/models?q=gemini). Gemma models are well-suited for a variety of...","created":1720828800,"context_length":8192,"modality":"text->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":6.5e-7,"completion":6.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_p"]},{"id":"google/gemma-3-12b-it","name":"Google: Gemma 3 12B","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741902625,"context_length":131072,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":4e-8,"completion":1.3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"google/gemma-3-12b-it:free","name":"Google: Gemma 3 12B (free)","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741902625,"context_length":32768,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","seed","stop","temperature","top_p"]},{"id":"google/gemma-3-27b-it","name":"Google: Gemma 3 27B","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741756359,"context_length":131072,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":8e-8,"completion":1.6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"google/gemma-3-27b-it:free","name":"Google: Gemma 3 27B (free)","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741756359,"context_length":131072,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","stop","temperature","top_p"]},{"id":"google/gemma-3-4b-it","name":"Google: Gemma 3 4B","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741905510,"context_length":131072,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":4e-8,"completion":8e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"google/gemma-3-4b-it:free","name":"Google: Gemma 3 4B (free)","description":"Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...","created":1741905510,"context_length":32768,"modality":"text+image->text","instruct_type":"gemma","tokenizer":"Gemini","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","stop","temperature","top_p"]},{"id":"google/gemma-3n-e2b-it:free","name":"Google: Gemma 3n 2B (free)","description":"Gemma 3n E2B IT is a multimodal, instruction-tuned model developed by Google DeepMind, designed to operate efficiently at an effective parameter size of 2B while leveraging a 6B architecture. Based...","created":1752074904,"context_length":8192,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"]},{"id":"google/gemma-3n-e4b-it","name":"Google: Gemma 3n 4B","description":"Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...","created":1747776824,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-8,"completion":1.2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"]},{"id":"google/gemma-3n-e4b-it:free","name":"Google: Gemma 3n 4B (free)","description":"Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...","created":1747776824,"context_length":8192,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"]},{"id":"google/gemma-4-26b-a4b-it","name":"Google: Gemma 4 26B A4B","description":"Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at...","created":1775227989,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Gemma","pricing":{"prompt":6e-8,"completion":3.3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"google/gemma-4-26b-a4b-it:free","name":"Google: Gemma 4 26B A4B  (free)","description":"Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at...","created":1775227989,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Gemma","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"google/gemma-4-31b-it","name":"Google: Gemma 4 31B","description":"Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function...","created":1775148486,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Gemma","pricing":{"prompt":1.3e-7,"completion":3.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"google/gemma-4-31b-it:free","name":"Google: Gemma 4 31B (free)","description":"Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function...","created":1775148486,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Gemma","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"google/lyria-3-clip-preview","name":"Google: Lyria 3 Clip Preview","description":"30 second duration clips are priced at $0.04 per clip. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate...","created":1774907255,"context_length":1048576,"modality":"text+image->text+audio","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"]},{"id":"google/lyria-3-pro-preview","name":"Google: Lyria 3 Pro Preview","description":"Full-length songs are priced at $0.08 per song. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate high-quality, 48kHz...","created":1774907286,"context_length":1048576,"modality":"text+image->text+audio","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["max_tokens","response_format","seed","temperature","top_p"]},{"id":"gryphe/mythomax-l2-13b","name":"MythoMax 13B","description":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay. #merge","created":1688256000,"context_length":4096,"modality":"text->text","instruct_type":"alpaca","tokenizer":"Llama2","pricing":{"prompt":6e-8,"completion":6e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_a","top_k","top_logprobs","top_p"]},{"id":"ibm-granite/granite-4.0-h-micro","name":"IBM: Granite 4.0 Micro","description":"Granite-4.0-H-Micro is a 3B parameter from the Granite 4 family of models. These models are the latest in a series of models released by IBM. They are fine-tuned for long...","created":1760927695,"context_length":131000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.7e-8,"completion":1.1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"ibm-granite/granite-4.1-8b","name":"IBM: Granite 4.1 8B","description":"Granite 4.1 8B is a dense, decoder-only 8-billion-parameter language model from IBM, part of the Granite 4.1 family. It supports a 131K-token context window and is designed for enterprise tasks...","created":1777577071,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":5e-8,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"inception/mercury-2","name":"Inception: Mercury 2","description":"Mercury 2 is an extremely fast reasoning LLM, and the first reasoning diffusion LLM (dLLM). Instead of generating tokens sequentially, Mercury 2 produces and refines multiple tokens in parallel, achieving...","created":1772636275,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.5e-7,"completion":7.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":50000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","stop","structured_outputs","temperature","tool_choice","tools"]},{"id":"inclusionai/ling-2.6-1t:free","name":"inclusionAI: Ling-2.6-1T (free)","description":"Ling-2.6-1T is an instant (instruct) model from inclusionAI and the company’s trillion-parameter flagship, designed for real-world agents that require fast execution and high efficiency at scale. It uses a “fast...","created":1776948238,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"inclusionai/ling-2.6-flash","name":"inclusionAI: Ling-2.6-flash","description":"Ling-2.6-flash is an instant (instruct) model from inclusionAI with 104B total parameters and 7.4B active parameters, designed for real-world agents that require fast responses, strong execution, and high token efficiency....","created":1776795886,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8e-8,"completion":2.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"inflection/inflection-3-pi","name":"Inflection: Inflection 3 Pi","description":"Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay. Pi...","created":1728604800,"context_length":8000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":1024,"is_moderated":false},"supported_parameters":["max_tokens","stop","temperature","top_p"]},{"id":"inflection/inflection-3-productivity","name":"Inflection: Inflection 3 Productivity","description":"Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news. For emotional...","created":1728604800,"context_length":8000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":1024,"is_moderated":false},"supported_parameters":["max_tokens","stop","temperature","top_p"]},{"id":"kwaipilot/kat-coder-pro-v2","name":"Kwaipilot: KAT-Coder-Pro V2","description":"KAT-Coder-Pro V2 is the latest high-performance model in KwaiKAT’s KAT-Coder series, designed for complex enterprise-grade software engineering and SaaS integration. It builds on the agentic coding strengths of earlier versions,...","created":1774649310,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":80000,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"liquid/lfm-2-24b-a2b","name":"LiquidAI: LFM2-24B-A2B","description":"LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per...","created":1772048711,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-8,"completion":1.2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","stop","temperature","top_k","top_p"]},{"id":"liquid/lfm-2.5-1.2b-instruct:free","name":"LiquidAI: LFM2.5-1.2B-Instruct (free)","description":"LFM2.5-1.2B-Instruct is a compact, high-performance instruction-tuned model built for fast on-device AI. It delivers strong chat quality in a 1.2B parameter footprint, with efficient edge inference and broad runtime support.","created":1768927521,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"liquid/lfm-2.5-1.2b-thinking:free","name":"LiquidAI: LFM2.5-1.2B-Thinking (free)","description":"LFM2.5-1.2B-Thinking is a lightweight reasoning-focused model optimized for agentic tasks, data extraction, and RAG—while still running comfortably on edge devices. It supports long context (up to 32K tokens) and is...","created":1768927527,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"mancer/weaver","name":"Mancer: Weaver (alpha)","description":"An attempt to recreate Claude-style verbosity, but don't expect the same level of coherence or memory. Meant for use in roleplay/narrative situations.","created":1690934400,"context_length":8000,"modality":"text->text","instruct_type":"alpaca","tokenizer":"Llama2","pricing":{"prompt":7.5e-7,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":2000,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_a","top_k","top_logprobs","top_p"]},{"id":"meta-llama/llama-3-70b-instruct","name":"Meta: Llama 3 70B Instruct","description":"Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 70B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong...","created":1713398400,"context_length":8192,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":5.1e-7,"completion":7.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"meta-llama/llama-3-8b-instruct","name":"Meta: Llama 3 8B Instruct","description":"Meta's latest class of model (Llama 3) launched with a variety of sizes & flavors. This 8B instruct-tuned version was optimized for high quality dialogue usecases. It has demonstrated strong...","created":1713398400,"context_length":8192,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":3e-8,"completion":4e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"meta-llama/llama-3.1-70b-instruct","name":"Meta: Llama 3.1 70B Instruct","description":"Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 70B instruct-tuned version is optimized for high quality dialogue usecases. It has demonstrated strong...","created":1721692800,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":4e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"meta-llama/llama-3.1-8b-instruct","name":"Meta: Llama 3.1 8B Instruct","description":"Meta's latest class of model (Llama 3.1) launched with a variety of sizes & flavors. This 8B instruct-tuned version is fast and efficient. It has demonstrated strong performance compared to...","created":1721692800,"context_length":16384,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":2e-8,"completion":5e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"meta-llama/llama-3.2-11b-vision-instruct","name":"Meta: Llama 3.2 11B Vision Instruct","description":"Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and...","created":1727222400,"context_length":131072,"modality":"text+image->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":2.45e-7,"completion":2.45e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"]},{"id":"meta-llama/llama-3.2-1b-instruct","name":"Meta: Llama 3.2 1B Instruct","description":"Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate...","created":1727222400,"context_length":60000,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":2.7e-8,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"meta-llama/llama-3.2-3b-instruct","name":"Meta: Llama 3.2 3B Instruct","description":"Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it...","created":1727222400,"context_length":80000,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":5.1e-8,"completion":3.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"meta-llama/llama-3.2-3b-instruct:free","name":"Meta: Llama 3.2 3B Instruct (free)","description":"Llama 3.2 3B is a 3-billion-parameter multilingual large language model, optimized for advanced natural language processing tasks like dialogue generation, reasoning, and summarization. Designed with the latest transformer architecture, it...","created":1727222400,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","top_k","top_p"]},{"id":"meta-llama/llama-3.3-70b-instruct","name":"Meta: Llama 3.3 70B Instruct","description":"The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...","created":1733506137,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":1e-7,"completion":3.2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"meta-llama/llama-3.3-70b-instruct:free","name":"Meta: Llama 3.3 70B Instruct (free)","description":"The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...","created":1733506137,"context_length":65536,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"meta-llama/llama-4-maverick","name":"Meta: Llama 4 Maverick","description":"Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward...","created":1743881822,"context_length":1048576,"modality":"text+image->text","instruct_type":null,"tokenizer":"Llama4","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"meta-llama/llama-4-scout","name":"Meta: Llama 4 Scout","description":"Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input...","created":1743881519,"context_length":327680,"modality":"text+image->text","instruct_type":null,"tokenizer":"Llama4","pricing":{"prompt":8e-8,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"meta-llama/llama-guard-3-8b","name":"Llama Guard 3 8B","description":"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification)...","created":1739401318,"context_length":131072,"modality":"text->text","instruct_type":"none","tokenizer":"Llama3","pricing":{"prompt":4.8e-7,"completion":3e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"meta-llama/llama-guard-4-12b","name":"Meta: Llama Guard 4 12B","description":"Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM...","created":1745975193,"context_length":163840,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.8e-7,"completion":1.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"]},{"id":"microsoft/phi-4","name":"Microsoft: Phi 4","description":"[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. At 14 billion...","created":1736489872,"context_length":16384,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6.5e-8,"completion":1.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_logprobs","top_p"]},{"id":"microsoft/wizardlm-2-8x22b","name":"WizardLM-2 8x22B","description":"WizardLM-2 8x22B is Microsoft AI's most advanced Wizard model. It demonstrates highly competitive performance compared to leading proprietary models, and it consistently outperforms all existing state-of-the-art opensource models. It is...","created":1713225600,"context_length":65535,"modality":"text->text","instruct_type":"vicuna","tokenizer":"Mistral","pricing":{"prompt":6.2e-7,"completion":6.2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"minimax/minimax-01","name":"MiniMax: MiniMax-01","description":"MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context...","created":1736915462,"context_length":1000192,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2e-7,"completion":0.0000011,"image":null,"request":null},"top_provider":{"max_completion_tokens":1000192,"is_moderated":false},"supported_parameters":["max_tokens","temperature","top_p"]},{"id":"minimax/minimax-m1","name":"MiniMax: MiniMax M1","description":"MiniMax-M1 is a large-scale, open-weight reasoning model designed for extended context and high-efficiency inference. It leverages a hybrid Mixture-of-Experts (MoE) architecture paired with a custom \"lightning attention\" mechanism, allowing it...","created":1750200414,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4e-7,"completion":0.0000022,"image":null,"request":null},"top_provider":{"max_completion_tokens":40000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"minimax/minimax-m2","name":"MiniMax: MiniMax M2","description":"MiniMax-M2 is a compact, high-efficiency large language model optimized for end-to-end coding and agentic workflows. With 10 billion activated parameters (230 billion total), it delivers near-frontier intelligence across general reasoning,...","created":1761252093,"context_length":196608,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.55e-7,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":196608,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"minimax/minimax-m2-her","name":"MiniMax: MiniMax M2-her","description":"MiniMax M2-her is a dialogue-first large language model built for immersive roleplay, character-driven chat, and expressive multi-turn conversations. Designed to stay consistent in tone and personality, it supports rich message...","created":1769177239,"context_length":65536,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":2048,"is_moderated":false},"supported_parameters":["max_tokens","temperature","top_p"]},{"id":"minimax/minimax-m2.1","name":"MiniMax: MiniMax M2.1","description":"MiniMax-M2.1 is a lightweight, state-of-the-art large language model optimized for coding, agentic workflows, and modern application development. With only 10 billion activated parameters, it delivers a major jump in real-world...","created":1766454997,"context_length":196608,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2.9e-7,"completion":9.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":196608,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"minimax/minimax-m2.5","name":"MiniMax: MiniMax M2.5","description":"MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1...","created":1770908502,"context_length":196608,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.5e-7,"completion":0.00000115,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"minimax/minimax-m2.5:free","name":"MiniMax: MiniMax M2.5 (free)","description":"MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1...","created":1770908502,"context_length":196608,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","temperature","tools"]},{"id":"minimax/minimax-m2.7","name":"MiniMax: MiniMax M2.7","description":"MiniMax-M2.7 is a next-generation large language model designed for autonomous, real-world productivity and continuous improvement. Built to actively participate in its own evolution, M2.7 integrates advanced agentic capabilities through multi-agent...","created":1773836697,"context_length":196608,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"mistralai/codestral-2508","name":"Mistral: Codestral 2508","description":"Mistral's cutting-edge language model for coding released end of July 2025. Codestral specializes in low-latency, high-frequency tasks such as fill-in-the-middle (FIM), code correction and test generation.\n\n[Blog Post](https://mistral.ai/news/codestral-25-08)","created":1754079630,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":3e-7,"completion":9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/devstral-2512","name":"Mistral: Devstral 2 2512","description":"Devstral 2 is a state-of-the-art open-source model by Mistral AI specializing in agentic coding. It is a 123B-parameter dense transformer model supporting a 256K context window. Devstral 2 supports exploring...","created":1765285419,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/devstral-medium","name":"Mistral: Devstral Medium","description":"Devstral Medium is a high-performance code generation and agentic reasoning model developed jointly by Mistral AI and All Hands AI. Positioned as a step up from Devstral Small, it achieves...","created":1752161321,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/devstral-small","name":"Mistral: Devstral Small 1.1","description":"Devstral Small 1.1 is a 24B parameter open-weight language model for software engineering agents, developed by Mistral AI in collaboration with All Hands AI. Finetuned from Mistral Small 3.1 and...","created":1752160751,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":1e-7,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/ministral-14b-2512","name":"Mistral: Ministral 3 14B 2512","description":"The largest model in the Ministral 3 family, Ministral 3 14B offers frontier capabilities and performance comparable to its larger Mistral Small 3.2 24B counterpart. A powerful and efficient language...","created":1764681735,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":2e-7,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"mistralai/ministral-3b-2512","name":"Mistral: Ministral 3 3B 2512","description":"The smallest model in the Ministral 3 family, Ministral 3 3B is a powerful, efficient tiny language model with vision capabilities.","created":1764681560,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":1e-7,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"mistralai/ministral-8b-2512","name":"Mistral: Ministral 3 8B 2512","description":"A balanced model in the Ministral 3 family, Ministral 3 8B is a powerful, efficient tiny language model with vision capabilities.","created":1764681654,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":1.5e-7,"completion":1.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"mistralai/mistral-7b-instruct-v0.1","name":"Mistral: Mistral 7B Instruct v0.1","description":"A 7.3B parameter model that outperforms Llama 2 13B on all benchmarks, with optimizations for speed and context length.","created":1695859200,"context_length":2824,"modality":"text->text","instruct_type":"mistral","tokenizer":"Mistral","pricing":{"prompt":1.1e-7,"completion":1.9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"mistralai/mistral-large","name":"Mistral Large","description":"This is Mistral AI's flagship model, Mistral Large 2 (version `mistral-large-2407`). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)....","created":1708905600,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-large-2407","name":"Mistral Large 2407","description":"This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)....","created":1731978415,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-large-2411","name":"Mistral Large 2411","description":"Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411) It provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable...","created":1731978685,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-large-2512","name":"Mistral: Mistral Large 3 2512","description":"Mistral Large 3 2512 is Mistral’s most capable model to date, featuring a sparse mixture-of-experts architecture with 41B active parameters (675B total), and released under the Apache 2.0 license.","created":1764624472,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":5e-7,"completion":0.0000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-medium-3","name":"Mistral: Mistral Medium 3","description":"Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost...","created":1746627341,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-medium-3-5","name":"Mistral: Mistral Medium 3.5","description":"Mistral Medium 3.5 is a dense 128B instruction-following model from Mistral AI. It supports text and image inputs with text output, and is designed for agentic workflows, coding, and complex...","created":1777570439,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":0.0000015,"completion":0.0000075,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-medium-3.1","name":"Mistral: Mistral Medium 3.1","description":"Mistral Medium 3.1 is an updated version of Mistral Medium 3, which is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances...","created":1755095639,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-nemo","name":"Mistral: Mistral Nemo","description":"A 12B parameter model with a 128k token context length built by Mistral in collaboration with NVIDIA. The model is multilingual, supporting English, French, German, Spanish, Italian, Portuguese, Chinese, Japanese,...","created":1721347200,"context_length":131072,"modality":"text->text","instruct_type":"mistral","tokenizer":"Mistral","pricing":{"prompt":2e-8,"completion":3e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"mistralai/mistral-saba","name":"Mistral: Saba","description":"Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional...","created":1739803239,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":2e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mistral-small-24b-instruct-2501","name":"Mistral: Mistral Small 3","description":"Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed...","created":1738255409,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":5e-8,"completion":8e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"mistralai/mistral-small-2603","name":"Mistral: Mistral Small 4","description":"Mistral Small 4 is the next major release in the Mistral Small family, unifying the capabilities of several flagship Mistral models into a single system. It combines strong reasoning from...","created":1773695685,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"mistralai/mistral-small-3.1-24b-instruct","name":"Mistral: Mistral Small 3.1 24B","description":"Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and...","created":1742238937,"context_length":128000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":3.5e-7,"completion":5.6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"mistralai/mistral-small-3.2-24b-instruct","name":"Mistral: Mistral Small 3.2 24B","description":"Mistral-Small-3.2-24B-Instruct-2506 is an updated 24B parameter model from Mistral optimized for instruction following, repetition reduction, and improved function calling. Compared to the 3.1 release, version 3.2 significantly improves accuracy on...","created":1750443016,"context_length":128000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":7.5e-8,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"mistralai/mixtral-8x22b-instruct","name":"Mistral: Mixtral 8x22B Instruct","description":"Mistral's official instruct fine-tuned version of [Mixtral 8x22B](/models/mistralai/mixtral-8x22b). It uses 39B active parameters out of 141B, offering unparalleled cost efficiency for its size. Its strengths include: - strong math, coding,...","created":1713312000,"context_length":65536,"modality":"text->text","instruct_type":"mistral","tokenizer":"Mistral","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/mixtral-8x7b-instruct","name":"Mistral: Mixtral 8x7B Instruct","description":"Mixtral 8x7B Instruct is a pretrained generative Sparse Mixture of Experts, by Mistral AI, for chat and instruction use. Incorporates 8 experts (feed-forward networks) for a total of 47 billion...","created":1702166400,"context_length":32768,"modality":"text->text","instruct_type":"mistral","tokenizer":"Mistral","pricing":{"prompt":5.4e-7,"completion":5.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"mistralai/pixtral-large-2411","name":"Mistral: Pixtral Large 2411","description":"Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images. The model is...","created":1731977388,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"mistralai/voxtral-small-24b-2507","name":"Mistral: Voxtral Small 24B 2507","description":"Voxtral Small is an enhancement of Mistral Small 3, incorporating state-of-the-art audio input capabilities while retaining best-in-class text performance. It excels at speech transcription, translation and audio understanding. Input audio...","created":1761835144,"context_length":32000,"modality":"text+audio->text","instruct_type":null,"tokenizer":"Mistral","pricing":{"prompt":1e-7,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"moonshotai/kimi-k2","name":"MoonshotAI: Kimi K2 0711","description":"Kimi K2 Instruct is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass. It is optimized for...","created":1752263252,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":5.7e-7,"completion":0.0000023,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"moonshotai/kimi-k2-0905","name":"MoonshotAI: Kimi K2 0905","description":"Kimi K2 0905 is the September update of [Kimi K2 0711](moonshotai/kimi-k2). It is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32...","created":1757021147,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"moonshotai/kimi-k2-thinking","name":"MoonshotAI: Kimi K2 Thinking","description":"Kimi K2 Thinking is Moonshot AI’s most advanced open reasoning model to date, extending the K2 series into agentic, long-horizon reasoning. Built on the trillion-parameter Mixture-of-Experts (MoE) architecture introduced in...","created":1762440622,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-7,"completion":0.0000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"moonshotai/kimi-k2.5","name":"MoonshotAI: Kimi K2.5","description":"Kimi K2.5 is Moonshot AI's native multimodal model, delivering state-of-the-art visual coding capability and a self-directed agent swarm paradigm. Built on Kimi K2 with continued pretraining over approximately 15T mixed...","created":1769487076,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4.4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"moonshotai/kimi-k2.6","name":"MoonshotAI: Kimi K2.6","description":"Kimi K2.6 is Moonshot AI's next-generation multimodal model, designed for long-horizon coding, coding-driven UI/UX generation, and multi-agent orchestration. It handles complex end-to-end coding tasks across Python, Rust, and Go, and...","created":1776699402,"context_length":262142,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":7.4e-7,"completion":0.00000349,"image":null,"request":null},"top_provider":{"max_completion_tokens":262142,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"morph/morph-v3-fast","name":"Morph: Morph V3 Fast","description":"Morph's fastest apply model for code edits. ~10,500 tokens/sec with 96% accuracy for rapid code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code> <update>{edit_snippet}</update>...","created":1751910002,"context_length":81920,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8e-7,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":38000,"is_moderated":false},"supported_parameters":["max_tokens","stop","temperature"]},{"id":"morph/morph-v3-large","name":"Morph: Morph V3 Large","description":"Morph's high-accuracy apply model for complex code edits. ~4,500 tokens/sec with 98% accuracy for precise code transformations. The model requires the prompt to be in the following format: <instruction>{instruction}</instruction> <code>{initial_code}</code>...","created":1751910858,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":9e-7,"completion":0.0000019,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["max_tokens","stop","temperature"]},{"id":"nex-agi/deepseek-v3.1-nex-n1","name":"Nex AGI: DeepSeek V3.1 Nex N1","description":"DeepSeek V3.1 Nex-N1 is the flagship release of the Nex-N1 series — a post-trained model designed to highlight agent autonomy, tool use, and real-world productivity. Nex-N1 demonstrates competitive performance across...","created":1765204393,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":1.35e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":163840,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"nousresearch/hermes-2-pro-llama-3-8b","name":"NousResearch: Hermes 2 Pro - Llama-3 8B","description":"Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced...","created":1716768000,"context_length":8192,"modality":"text->text","instruct_type":"chatml","tokenizer":"Llama3","pricing":{"prompt":1.4e-7,"completion":1.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"nousresearch/hermes-3-llama-3.1-405b","name":"Nous: Hermes 3 405B Instruct","description":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","created":1723766400,"context_length":131072,"modality":"text->text","instruct_type":"chatml","tokenizer":"Llama3","pricing":{"prompt":0.000001,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"nousresearch/hermes-3-llama-3.1-405b:free","name":"Nous: Hermes 3 405B Instruct (free)","description":"Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","created":1723766400,"context_length":131072,"modality":"text->text","instruct_type":"chatml","tokenizer":"Llama3","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","top_k","top_p"]},{"id":"nousresearch/hermes-3-llama-3.1-70b","name":"Nous: Hermes 3 70B Instruct","description":"Hermes 3 is a generalist language model with many improvements over [Hermes 2](/models/nousresearch/nous-hermes-2-mistral-7b-dpo), including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the...","created":1723939200,"context_length":131072,"modality":"text->text","instruct_type":"chatml","tokenizer":"Llama3","pricing":{"prompt":3e-7,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"nousresearch/hermes-4-405b","name":"Nous: Hermes 4 405B","description":"Hermes 4 is a large-scale reasoning model built on Meta-Llama-3.1-405B and released by Nous Research. It introduces a hybrid reasoning mode, where the model can choose to deliberate internally with...","created":1756235463,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000001,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","temperature","top_k","top_p"]},{"id":"nousresearch/hermes-4-70b","name":"Nous: Hermes 4 70B","description":"Hermes 4 70B is a hybrid reasoning model from Nous Research, built on Meta-Llama-3.1-70B. It introduces the same hybrid mode as the larger 405B release, allowing the model to either...","created":1756236182,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Llama3","pricing":{"prompt":1.3e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","temperature","top_k","top_p"]},{"id":"nvidia/llama-3.1-nemotron-70b-instruct","name":"NVIDIA: Llama 3.1 Nemotron 70B Instruct","description":"NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels...","created":1728950400,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":0.0000012,"completion":0.0000012,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"nvidia/llama-3.3-nemotron-super-49b-v1.5","name":"NVIDIA: Llama 3.3 Nemotron Super 49B V1.5","description":"Llama-3.3-Nemotron-Super-49B-v1.5 is a 49B-parameter, English-centric reasoning/chat model derived from Meta’s Llama-3.3-70B-Instruct with a 128K context. It’s post-trained for agentic workflows (RAG, tool calling) via SFT across math, code, science, and...","created":1760101395,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Llama3","pricing":{"prompt":1e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"nvidia/nemotron-3-nano-30b-a3b","name":"NVIDIA: Nemotron 3 Nano 30B A3B","description":"NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully...","created":1765731275,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":5e-8,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":228000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"nvidia/nemotron-3-nano-30b-a3b:free","name":"NVIDIA: Nemotron 3 Nano 30B A3B (free)","description":"NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully...","created":1765731275,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"]},{"id":"nvidia/nemotron-3-nano-omni-30b-a3b-reasoning:free","name":"NVIDIA: Nemotron 3 Nano Omni (free)","description":"NVIDIA Nemotron™ 3 Nano Omni is a 30B-A3B open multimodal model designed to function as a perception and context sub-agent in enterprise agent systems. It accepts text, image, video, and...","created":1777393095,"context_length":256000,"modality":"text+image+audio+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"]},{"id":"nvidia/nemotron-3-super-120b-a12b","name":"NVIDIA: Nemotron 3 Super","description":"NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer...","created":1773245239,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":9e-8,"completion":4.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"nvidia/nemotron-3-super-120b-a12b:free","name":"NVIDIA: Nemotron 3 Super (free)","description":"NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer...","created":1773245239,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"nvidia/nemotron-nano-12b-v2-vl","name":"NVIDIA: Nemotron Nano 12B 2 VL","description":"NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s...","created":1761675565,"context_length":131072,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","top_k","top_p"]},{"id":"nvidia/nemotron-nano-12b-v2-vl:free","name":"NVIDIA: Nemotron Nano 12B 2 VL (free)","description":"NVIDIA Nemotron Nano 2 VL is a 12-billion-parameter open multimodal reasoning model designed for video understanding and document intelligence. It introduces a hybrid Transformer-Mamba architecture, combining transformer-level accuracy with Mamba’s...","created":1761675565,"context_length":128000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","temperature","tool_choice","tools","top_p"]},{"id":"nvidia/nemotron-nano-9b-v2","name":"NVIDIA: Nemotron Nano 9B V2","description":"NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and...","created":1757106807,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4e-8,"completion":1.6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"nvidia/nemotron-nano-9b-v2:free","name":"NVIDIA: Nemotron Nano 9B V2 (free)","description":"NVIDIA-Nemotron-Nano-9B-v2 is a large language model (LLM) trained from scratch by NVIDIA, and designed as a unified model for both reasoning and non-reasoning tasks. It responds to user queries and...","created":1757106807,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"openai/gpt-3.5-turbo","name":"OpenAI: GPT-3.5 Turbo","description":"GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.","created":1685232000,"context_length":16385,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":5e-7,"completion":0.0000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-3.5-turbo-0613","name":"OpenAI: GPT-3.5 Turbo (older v0613)","description":"GPT-3.5 Turbo is OpenAI's fastest model. It can understand and generate natural language or code, and is optimized for chat and traditional completion tasks.\n\nTraining data up to Sep 2021.","created":1706140800,"context_length":4095,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000001,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-3.5-turbo-16k","name":"OpenAI: GPT-3.5 Turbo 16k","description":"This model offers four times the context length of gpt-3.5-turbo, allowing it to support approximately 20 pages of text in a single request at a higher cost. Training data: up...","created":1693180800,"context_length":16385,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000003,"completion":0.000004,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-3.5-turbo-instruct","name":"OpenAI: GPT-3.5 Turbo Instruct","description":"This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.","created":1695859200,"context_length":4095,"modality":"text->text","instruct_type":"chatml","tokenizer":"GPT","pricing":{"prompt":0.0000015,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"]},{"id":"openai/gpt-4","name":"OpenAI: GPT-4","description":"OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning...","created":1685232000,"context_length":8191,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00003,"completion":0.00006,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4-0314","name":"OpenAI: GPT-4 (older v0314)","description":"GPT-4-0314 is the first version of GPT-4 released, with a context length of 8,192 tokens, and was supported until June 14. Training data: up to Sep 2021.","created":1685232000,"context_length":8191,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00003,"completion":0.00006,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4-1106-preview","name":"OpenAI: GPT-4 Turbo (older v1106)","description":"The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to April 2023.","created":1699228800,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00001,"completion":0.00003,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4-turbo","name":"OpenAI: GPT-4 Turbo","description":"The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.\n\nTraining data: up to December 2023.","created":1712620800,"context_length":128000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00001,"completion":0.00003,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4-turbo-preview","name":"OpenAI: GPT-4 Turbo Preview","description":"The preview GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Training data: up to Dec 2023. **Note:** heavily rate limited by OpenAI while...","created":1706140800,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00001,"completion":0.00003,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4.1","name":"OpenAI: GPT-4.1","description":"GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and...","created":1744651385,"context_length":1047576,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"openai/gpt-4.1-mini","name":"OpenAI: GPT-4.1 Mini","description":"GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard...","created":1744651381,"context_length":1047576,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":4e-7,"completion":0.0000016,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":true},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"openai/gpt-4.1-nano","name":"OpenAI: GPT-4.1 Nano","description":"For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million...","created":1744651369,"context_length":1047576,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":1e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":true},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"openai/gpt-4o","name":"OpenAI: GPT-4o","description":"GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as...","created":1715558400,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-2024-05-13","name":"OpenAI: GPT-4o (2024-05-13)","description":"GPT-4o (\"o\" for \"omni\") is OpenAI's latest AI model, supporting both text and image inputs with text outputs. It maintains the intelligence level of [GPT-4 Turbo](/models/openai/gpt-4-turbo) while being twice as...","created":1715558400,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000005,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-2024-08-06","name":"OpenAI: GPT-4o (2024-08-06)","description":"The 2024-08-06 version of GPT-4o offers improved performance in structured outputs, with the ability to supply a JSON schema in the respone_format. Read more [here](https://openai.com/index/introducing-structured-outputs-in-the-api/). GPT-4o (\"o\" for \"omni\") is...","created":1722902400,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-2024-11-20","name":"OpenAI: GPT-4o (2024-11-20)","description":"The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It’s also better at working with uploaded...","created":1732127594,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-audio-preview","name":"OpenAI: GPT-4o Audio","description":"The gpt-4o-audio-preview model adds support for audio inputs as prompts. This enhancement allows the model to detect nuances within audio recordings and add depth to generated user experiences. Audio outputs...","created":1755233061,"context_length":128000,"modality":"text+audio->text+audio","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-4o-mini","name":"OpenAI: GPT-4o-mini","description":"GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable...","created":1721260800,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_completion_tokens","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-mini-2024-07-18","name":"OpenAI: GPT-4o-mini (2024-07-18)","description":"GPT-4o mini is OpenAI's newest model after [GPT-4 Omni](/models/openai/gpt-4o), supporting both text and image inputs with text outputs. As their most advanced small model, it is many multiples more affordable...","created":1721260800,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p","web_search_options"]},{"id":"openai/gpt-4o-mini-search-preview","name":"OpenAI: GPT-4o-mini Search Preview","description":"GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.","created":1741818122,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["max_tokens","response_format","structured_outputs","web_search_options"]},{"id":"openai/gpt-4o-search-preview","name":"OpenAI: GPT-4o Search Preview","description":"GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.","created":1741817949,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["max_tokens","response_format","structured_outputs","web_search_options"]},{"id":"openai/gpt-5","name":"OpenAI: GPT-5","description":"GPT-5 is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and accuracy...","created":1754587413,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5-chat","name":"OpenAI: GPT-5 Chat","description":"GPT-5 Chat is designed for advanced, natural, multimodal, and context-aware conversations for enterprise applications.","created":1754587837,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["max_tokens","response_format","seed","structured_outputs"]},{"id":"openai/gpt-5-codex","name":"OpenAI: GPT-5 Codex","description":"GPT-5-Codex is a specialized version of GPT-5 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","created":1758643403,"context_length":400000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5-image","name":"OpenAI: GPT-5 Image","description":"[GPT-5](https://openrouter.ai/openai/gpt-5) Image combines OpenAI's GPT-5 model with state-of-the-art image generation capabilities. It offers major improvements in reasoning, code quality, and user experience while incorporating GPT Image 1's superior instruction following,...","created":1760447986,"context_length":400000,"modality":"text+image+file->text+image","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00001,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"]},{"id":"openai/gpt-5-image-mini","name":"OpenAI: GPT-5 Image Mini","description":"GPT-5 Image Mini combines OpenAI's advanced language capabilities, powered by [GPT-5 Mini](https://openrouter.ai/openai/gpt-5-mini), with GPT Image 1 Mini for efficient image generation. This natively multimodal model features superior instruction following, text...","created":1760624583,"context_length":400000,"modality":"text+image+file->text+image","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","top_logprobs","top_p"]},{"id":"openai/gpt-5-mini","name":"OpenAI: GPT-5 Mini","description":"GPT-5 Mini is a compact version of GPT-5, designed to handle lighter-weight reasoning tasks. It provides the same instruction-following and safety-tuning benefits as GPT-5, but with reduced latency and cost....","created":1754587407,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":2.5e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5-nano","name":"OpenAI: GPT-5 Nano","description":"GPT-5-Nano is the smallest and fastest variant in the GPT-5 system, optimized for developer tools, rapid interactions, and ultra-low latency environments. While limited in reasoning depth compared to its larger...","created":1754587402,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":5e-8,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5-pro","name":"OpenAI: GPT-5 Pro","description":"GPT-5 Pro is OpenAI’s most advanced model, offering major improvements in reasoning, code quality, and user experience. It is optimized for complex tasks that require step-by-step reasoning, instruction following, and...","created":1759776663,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000015,"completion":0.00012,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.1","name":"OpenAI: GPT-5.1","description":"GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning...","created":1763060305,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.1-chat","name":"OpenAI: GPT-5.1 Chat","description":"GPT-5.1 Chat (AKA Instant is the fast, lightweight member of the 5.1 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on...","created":1763060302,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.1-codex","name":"OpenAI: GPT-5.1-Codex","description":"GPT-5.1-Codex is a specialized version of GPT-5.1 optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","created":1763060298,"context_length":400000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.1-codex-max","name":"OpenAI: GPT-5.1-Codex-Max","description":"GPT-5.1-Codex-Max is OpenAI’s latest agentic coding model, designed for long-running, high-context software development tasks. It is based on an updated version of the 5.1 reasoning stack and trained on agentic...","created":1764878934,"context_length":400000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000125,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.1-codex-mini","name":"OpenAI: GPT-5.1-Codex-Mini","description":"GPT-5.1-Codex-Mini is a smaller and faster version of GPT-5.1-Codex","created":1763057820,"context_length":400000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":2.5e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.2","name":"OpenAI: GPT-5.2","description":"GPT-5.2 is the latest frontier-grade model in the GPT-5 series, offering stronger agentic and long context perfomance compared to GPT-5.1. It uses adaptive reasoning to allocate computation dynamically, responding quickly...","created":1765389775,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000175,"completion":0.000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.2-chat","name":"OpenAI: GPT-5.2 Chat","description":"GPT-5.2 Chat (AKA Instant) is the fast, lightweight member of the 5.2 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on...","created":1765389783,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000175,"completion":0.000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":32000,"is_moderated":false},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.2-codex","name":"OpenAI: GPT-5.2-Codex","description":"GPT-5.2-Codex is an upgraded version of GPT-5.1-Codex optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks....","created":1768409315,"context_length":400000,"modality":"text+image->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000175,"completion":0.000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.2-pro","name":"OpenAI: GPT-5.2 Pro","description":"GPT-5.2 Pro is OpenAI’s most advanced model, offering major improvements in agentic coding and long context performance over GPT-5 Pro. It is optimized for complex tasks that require step-by-step reasoning,...","created":1765389780,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000021,"completion":0.000168,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.3-chat","name":"OpenAI: GPT-5.3 Chat","description":"GPT-5.3 Chat is an update to ChatGPT's most-used model that makes everyday conversations smoother, more useful, and more directly helpful. It delivers more accurate answers with better contextualization and significantly...","created":1772564061,"context_length":128000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000175,"completion":0.000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["max_completion_tokens","max_tokens","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.3-codex","name":"OpenAI: GPT-5.3-Codex","description":"GPT-5.3-Codex is OpenAI’s most advanced agentic coding model, combining the frontier software engineering performance of GPT-5.2-Codex with the broader reasoning and professional knowledge capabilities of GPT-5.2. It achieves state-of-the-art results...","created":1771959164,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00000175,"completion":0.000014,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.4","name":"OpenAI: GPT-5.4","description":"GPT-5.4 is OpenAI’s latest frontier model, unifying the Codex and GPT lines into a single system. It features a 1M+ token context window (922K input, 128K output) with support for...","created":1772734352,"context_length":1050000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.4-image-2","name":"OpenAI: GPT-5.4 Image 2","description":"[GPT-5.4](https://openrouter.ai/openai/gpt-5.4) Image 2 combines OpenAI's GPT-5.4 model with state-of-the-art image generation capabilities from GPT Image 2. It enables rich multimodal workflows, allowing users to seamlessly move between reasoning, coding, and...","created":1776797528,"context_length":272000,"modality":"text+image+file->text+image","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000008,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","top_logprobs"]},{"id":"openai/gpt-5.4-mini","name":"OpenAI: GPT-5.4 Mini","description":"GPT-5.4 mini brings the core capabilities of GPT-5.4 to a faster, more efficient model optimized for high-throughput workloads. It supports text and image inputs with strong performance across reasoning, coding,...","created":1773748178,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":7.5e-7,"completion":0.0000045,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.4-nano","name":"OpenAI: GPT-5.4 Nano","description":"GPT-5.4 nano is the most lightweight and cost-efficient variant of the GPT-5.4 family, optimized for speed-critical and high-volume tasks. It supports text and image inputs and is designed for low-latency...","created":1773748187,"context_length":400000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":2e-7,"completion":0.00000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.4-pro","name":"OpenAI: GPT-5.4 Pro","description":"GPT-5.4 Pro is OpenAI's most advanced model, building on GPT-5.4's unified architecture with enhanced reasoning capabilities for complex, high-stakes tasks. It features a 1M+ token context window (922K input, 128K...","created":1772734366,"context_length":1050000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00003,"completion":0.00018,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.5","name":"OpenAI: GPT-5.5","description":"GPT-5.5 is OpenAI’s frontier model designed for complex professional workloads, building on GPT-5.4 with stronger reasoning, higher reliability, and improved token efficiency on hard tasks. It features a 1M+ token...","created":1777051893,"context_length":1050000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000005,"completion":0.00003,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_completion_tokens","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-5.5-pro","name":"OpenAI: GPT-5.5 Pro","description":"GPT-5.5 Pro is OpenAI’s high-capability model optimized for deep reasoning and accuracy on complex, high-stakes workloads. It features a 1M+ token context window (922K input, 128K output) with support for...","created":1777051896,"context_length":1050000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00003,"completion":0.00018,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/gpt-audio","name":"OpenAI: GPT Audio","description":"The gpt-audio model is OpenAI's first generally available audio model. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Audio is priced...","created":1768862569,"context_length":128000,"modality":"text+audio->text+audio","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000025,"completion":0.00001,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-audio-mini","name":"OpenAI: GPT Audio Mini","description":"A cost-efficient version of GPT Audio. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Input is priced at $0.60 per million...","created":1768859419,"context_length":128000,"modality":"text+audio->text+audio","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":6e-7,"completion":0.0000024,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":true},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/gpt-oss-120b","name":"OpenAI: gpt-oss-120b","description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized...","created":1754414231,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":3.9e-8,"completion":1.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"openai/gpt-oss-120b:free","name":"OpenAI: gpt-oss-120b (free)","description":"gpt-oss-120b is an open-weight, 117B-parameter Mixture-of-Experts (MoE) language model from OpenAI designed for high-reasoning, agentic, and general-purpose production use cases. It activates 5.1B parameters per forward pass and is optimized...","created":1754414231,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","tool_choice","tools"]},{"id":"openai/gpt-oss-20b","name":"OpenAI: gpt-oss-20b","description":"gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for...","created":1754414229,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":3e-8,"completion":1.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"openai/gpt-oss-20b:free","name":"OpenAI: gpt-oss-20b (free)","description":"gpt-oss-20b is an open-weight 21B parameter model released by OpenAI under the Apache 2.0 license. It uses a Mixture-of-Experts (MoE) architecture with 3.6B active parameters per forward pass, optimized for...","created":1754414229,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","tool_choice","tools"]},{"id":"openai/gpt-oss-safeguard-20b","name":"OpenAI: gpt-oss-safeguard-20b","description":"gpt-oss-safeguard-20b is a safety reasoning model from OpenAI built upon gpt-oss-20b. This open-weight, 21B-parameter Mixture-of-Experts (MoE) model offers lower latency for safety tasks like content classification, LLM filtering, and trust...","created":1761752836,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":7.5e-8,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","stop","temperature","tool_choice","tools","top_p"]},{"id":"openai/o1","name":"OpenAI: o1","description":"The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason...","created":1734459999,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000015,"completion":0.00006,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o1-pro","name":"OpenAI: o1-pro","description":"The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide...","created":1742423211,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00015,"completion":0.0006,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs"]},{"id":"openai/o3","name":"OpenAI: o3","description":"o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following....","created":1744823457,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o3-deep-research","name":"OpenAI: o3 Deep Research","description":"o3-deep-research is OpenAI's advanced model for deep research, designed to tackle complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.","created":1760129661,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00001,"completion":0.00004,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/o3-mini","name":"OpenAI: o3 Mini","description":"OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. This model supports the `reasoning_effort` parameter, which can be set to...","created":1738351721,"context_length":200000,"modality":"text+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000011,"completion":0.0000044,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o3-mini-high","name":"OpenAI: o3 Mini High","description":"OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and...","created":1739372611,"context_length":200000,"modality":"text+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000011,"completion":0.0000044,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o3-pro","name":"OpenAI: o3 Pro","description":"The o-series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o3-pro model uses more compute to think harder and provide consistently...","created":1749598352,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.00002,"completion":0.00008,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o4-mini","name":"OpenAI: o4 Mini","description":"OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning...","created":1744820942,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000011,"completion":0.0000044,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openai/o4-mini-deep-research","name":"OpenAI: o4 Mini Deep Research","description":"o4-mini-deep-research is OpenAI's faster, more affordable deep research model—ideal for tackling complex, multi-step research tasks.\n\nNote: This model always uses the 'web_search' tool which adds additional cost.","created":1760129642,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"openai/o4-mini-high","name":"OpenAI: o4 Mini High","description":"OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining...","created":1744824212,"context_length":200000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"GPT","pricing":{"prompt":0.0000011,"completion":0.0000044,"image":null,"request":null},"top_provider":{"max_completion_tokens":100000,"is_moderated":true},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","seed","structured_outputs","tool_choice","tools"]},{"id":"openrouter/auto","name":"Auto Router","description":"\"Your prompt will be processed by a meta-model and routed to one of dozens of models (see below), optimizing for the best possible output. To see which model was used,...","created":1699401600,"context_length":2000000,"modality":"text+image+file+audio+video->text+image","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":-1,"completion":-1,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_completion_tokens","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p","web_search_options"]},{"id":"openrouter/bodybuilder","name":"Body Builder (beta)","description":"Transform your natural language requests into structured OpenRouter API request objects. Describe what you want to accomplish with AI models, and Body Builder will construct the appropriate API calls. Example:...","created":1764903653,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":-1,"completion":-1,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":[]},{"id":"openrouter/free","name":"Free Models Router","description":"The simplest way to get free inference. openrouter/free is a router that selects free models at random from the models available on OpenRouter. The router smartly filters for models that...","created":1769917427,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"openrouter/owl-alpha","name":"Owl Alpha","description":"Owl Alpha is a high-performance foundation model designed for agentic workloads. Natively supports tool use, and long-context tasks, with strong performance in code generation, automated workflows, and complex instruction execution....","created":1777398589,"context_length":1048756,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tools","top_k","top_p"]},{"id":"openrouter/pareto-code","name":"Pareto Code Router","description":"The Pareto Router is a way to have OpenRouter always pick a strong coding model for your needs without committing to a specific one. You express a single `min_coding_score` preference...","created":1776747900,"context_length":200000,"modality":"text->text","instruct_type":null,"tokenizer":"Router","pricing":{"prompt":-1,"completion":-1,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":[]},{"id":"perplexity/sonar","name":"Perplexity: Sonar","description":"Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features...","created":1738013808,"context_length":127072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000001,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","temperature","top_k","top_p","web_search_options"]},{"id":"perplexity/sonar-deep-research","name":"Perplexity: Sonar Deep Research","description":"Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers...","created":1741311246,"context_length":128000,"modality":"text->text","instruct_type":"deepseek-r1","tokenizer":"Other","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","temperature","top_k","top_p","web_search_options"]},{"id":"perplexity/sonar-pro","name":"Perplexity: Sonar Pro","description":"Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) For enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibility, like...","created":1741312423,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","temperature","top_k","top_p","web_search_options"]},{"id":"perplexity/sonar-pro-search","name":"Perplexity: Sonar Pro Search","description":"Exclusively available on the OpenRouter API, Sonar Pro's new Pro Search mode is Perplexity's most advanced agentic search system. It is designed for deeper reasoning and analysis. Pricing is based...","created":1761854366,"context_length":200000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":8000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","structured_outputs","temperature","top_k","top_p","web_search_options"]},{"id":"perplexity/sonar-reasoning-pro","name":"Perplexity: Sonar Reasoning Pro","description":"Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) Sonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for...","created":1741313308,"context_length":128000,"modality":"text+image->text","instruct_type":"deepseek-r1","tokenizer":"Other","pricing":{"prompt":0.000002,"completion":0.000008,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","temperature","top_k","top_p","web_search_options"]},{"id":"poolside/laguna-m.1:free","name":"Poolside: Laguna M.1 (free)","description":"Laguna M.1 is the flagship coding agent model from [Poolside](https://poolside.ai), optimized for complex software engineering tasks. Designed for agentic coding workflows, it supports tool calling and reasoning, with a 128K...","created":1777388504,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools"]},{"id":"poolside/laguna-xs.2:free","name":"Poolside: Laguna XS.2 (free)","description":"Laguna XS.2 is the second-generation model in the XS size class from [Poolside](https://poolside.ai), their efficient coding agent series. It combines tool calling and reasoning capabilities with a compact footprint, offering...","created":1777389604,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools"]},{"id":"prime-intellect/intellect-3","name":"Prime Intellect: INTELLECT-3","description":"INTELLECT-3 is a 106B-parameter Mixture-of-Experts model (12B active) post-trained from GLM-4.5-Air-Base using supervised fine-tuning (SFT) followed by large-scale reinforcement learning (RL). It offers state-of-the-art performance for its size across math,...","created":1764212534,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":2e-7,"completion":0.0000011,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen-2.5-72b-instruct","name":"Qwen2.5 72B Instruct","description":"Qwen2.5 72B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and...","created":1726704000,"context_length":32768,"modality":"text->text","instruct_type":"chatml","tokenizer":"Qwen","pricing":{"prompt":3.6e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen-2.5-7b-instruct","name":"Qwen: Qwen2.5 7B Instruct","description":"Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and...","created":1729036800,"context_length":32768,"modality":"text->text","instruct_type":"chatml","tokenizer":"Qwen","pricing":{"prompt":4e-8,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen-2.5-coder-32b-instruct","name":"Qwen2.5 Coder 32B Instruct","description":"Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: - Significantly improvements in **code generation**, **code reasoning**...","created":1731368400,"context_length":32768,"modality":"text->text","instruct_type":"chatml","tokenizer":"Qwen","pricing":{"prompt":6.6e-7,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","temperature","top_k","top_p"]},{"id":"qwen/qwen-max","name":"Qwen: Qwen-Max","description":"Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion...","created":1738402289,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":0.00000104,"completion":0.00000416,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-plus","name":"Qwen: Qwen-Plus","description":"Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.","created":1738409840,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":2.6e-7,"completion":7.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-plus-2025-07-28","name":"Qwen: Qwen Plus 0728","description":"Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.","created":1757347599,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.6e-7,"completion":7.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-plus-2025-07-28:thinking","name":"Qwen: Qwen Plus 0728 (thinking)","description":"Qwen Plus 0728, based on the Qwen3 foundation model, is a 1 million context hybrid reasoning model with a balanced performance, speed, and cost combination.","created":1757347599,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.6e-7,"completion":7.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-turbo","name":"Qwen: Qwen-Turbo","description":"Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.","created":1738410974,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":3.25e-8,"completion":1.3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-vl-max","name":"Qwen: Qwen VL Max","description":"Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.","created":1738434304,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":5.2e-7,"completion":0.00000208,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen-vl-plus","name":"Qwen: Qwen VL Plus","description":"Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for...","created":1738731255,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":1.365e-7,"completion":4.095e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","top_p"]},{"id":"qwen/qwen2.5-vl-72b-instruct","name":"Qwen: Qwen2.5 VL 72B Instruct","description":"Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.","created":1738410311,"context_length":32000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":2.5e-7,"completion":7.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"qwen/qwen3-14b","name":"Qwen: Qwen3 14B","description":"Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for...","created":1745876478,"context_length":40960,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":6e-8,"completion":2.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":40960,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3-235b-a22b","name":"Qwen: Qwen3 235B A22B","description":"Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a \"thinking\" mode for complex reasoning, math, and...","created":1745875757,"context_length":131072,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":4.55e-7,"completion":0.00000182,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-235b-a22b-2507","name":"Qwen: Qwen3 235B A22B Instruct 2507","description":"Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following,...","created":1753119555,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":7.1e-8,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3-235b-a22b-thinking-2507","name":"Qwen: Qwen3 235B A22B Thinking 2507","description":"Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144...","created":1753449557,"context_length":131072,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":1.495e-7,"completion":0.000001495,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-30b-a3b","name":"Qwen: Qwen3 30B A3B","description":"Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique...","created":1745878604,"context_length":40960,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":9e-8,"completion":4.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":20000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3-30b-a3b-instruct-2507","name":"Qwen: Qwen3 30B A3B Instruct 2507","description":"Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and...","created":1753806965,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":9e-8,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-30b-a3b-thinking-2507","name":"Qwen: Qwen3 30B A3B Thinking 2507","description":"Qwen3-30B-A3B-Thinking-2507 is a 30B parameter Mixture-of-Experts reasoning model optimized for complex tasks requiring extended multi-step thinking. The model is designed specifically for “thinking mode,” where internal reasoning traces are separated...","created":1756399192,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":8e-8,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-32b","name":"Qwen: Qwen3 32B","description":"Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a \"thinking\" mode for...","created":1745875945,"context_length":40960,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":8e-8,"completion":2.4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":40960,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-8b","name":"Qwen: Qwen3 8B","description":"Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between \"thinking\" mode for math,...","created":1745876632,"context_length":40960,"modality":"text->text","instruct_type":"qwen3","tokenizer":"Qwen3","pricing":{"prompt":5e-8,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-coder","name":"Qwen: Qwen3 Coder 480B A35B","description":"Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over...","created":1753230546,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.2e-7,"completion":0.0000018,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-coder-30b-a3b-instruct","name":"Qwen: Qwen3 Coder 30B A3B Instruct","description":"Qwen3-Coder-30B-A3B-Instruct is a 30.5B parameter Mixture-of-Experts (MoE) model with 128 experts (8 active per forward pass), designed for advanced code generation, repository-scale understanding, and agentic tool use. Built on the...","created":1753972379,"context_length":160000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":7e-8,"completion":2.7e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-coder-flash","name":"Qwen: Qwen3 Coder Flash","description":"Qwen3 Coder Flash is Alibaba's fast and cost efficient version of their proprietary Qwen3 Coder Plus. It is a powerful coding agent model specializing in autonomous programming via tool calling...","created":1758115536,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.95e-7,"completion":9.75e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-coder-next","name":"Qwen: Qwen3 Coder Next","description":"Qwen3-Coder-Next is an open-weight causal language model optimized for coding agents and local development workflows. It uses a sparse MoE design with 80B total parameters and only 3B activated per...","created":1770164101,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":1.2e-7,"completion":8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-coder-plus","name":"Qwen: Qwen3 Coder Plus","description":"Qwen3 Coder Plus is Alibaba's proprietary version of the Open Source Qwen3 Coder 480B A35B. It is a powerful coding agent model specializing in autonomous programming via tool calling and...","created":1758662707,"context_length":1000000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":6.5e-7,"completion":0.00000325,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-coder:free","name":"Qwen: Qwen3 Coder 480B A35B (free)","description":"Qwen3-Coder-480B-A35B-Instruct is a Mixture-of-Experts (MoE) code generation model developed by the Qwen team. It is optimized for agentic coding tasks such as function calling, tool use, and long-context reasoning over...","created":1753230546,"context_length":262000,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":262000,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-max","name":"Qwen: Qwen3 Max","description":"Qwen3-Max is an updated release built on the Qwen3 series, offering major improvements in reasoning, instruction following, multilingual support, and long-tail knowledge coverage compared to the January 2025 version. It...","created":1758662808,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":7.8e-7,"completion":0.0000039,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-max-thinking","name":"Qwen: Qwen3 Max Thinking","description":"Qwen3-Max-Thinking is the flagship reasoning model in the Qwen3 series, designed for high-stakes cognitive tasks that require deep, multi-step reasoning. By significantly scaling model capacity and reinforcement learning compute, it...","created":1770671901,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":7.8e-7,"completion":0.0000039,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-next-80b-a3b-instruct","name":"Qwen: Qwen3 Next 80B A3B Instruct","description":"Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual...","created":1757612213,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":9e-8,"completion":0.0000011,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-next-80b-a3b-instruct:free","name":"Qwen: Qwen3 Next 80B A3B Instruct (free)","description":"Qwen3-Next-80B-A3B-Instruct is an instruction-tuned chat model in the Qwen3-Next series optimized for fast, stable responses without “thinking” traces. It targets complex tasks across reasoning, code generation, knowledge QA, and multilingual...","created":1757612213,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-next-80b-a3b-thinking","name":"Qwen: Qwen3 Next 80B A3B Thinking","description":"Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic...","created":1757612284,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":9.75e-8,"completion":7.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-235b-a22b-instruct","name":"Qwen: Qwen3 VL 235B A22B Instruct","description":"Qwen3-VL-235B-A22B Instruct is an open-weight multimodal model that unifies strong text generation with visual understanding across images and video. The Instruct model targets general vision-language use (VQA, document parsing, chart/table...","created":1758668687,"context_length":262144,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2e-7,"completion":8.8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-235b-a22b-thinking","name":"Qwen: Qwen3 VL 235B A22B Thinking","description":"Qwen3-VL-235B-A22B Thinking is a multimodal model that unifies strong text generation with visual understanding across images and video. The Thinking model is optimized for multimodal reasoning in STEM and math....","created":1758668690,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.6e-7,"completion":0.0000026,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-30b-a3b-instruct","name":"Qwen: Qwen3 VL 30B A3B Instruct","description":"Qwen3-VL-30B-A3B-Instruct is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Instruct variant optimizes instruction-following for general multimodal tasks. It excels in perception...","created":1759794476,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.3e-7,"completion":5.2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-30b-a3b-thinking","name":"Qwen: Qwen3 VL 30B A3B Thinking","description":"Qwen3-VL-30B-A3B-Thinking is a multimodal model that unifies strong text generation with visual understanding for images and videos. Its Thinking variant enhances reasoning in STEM, math, and complex tasks. It excels...","created":1759794479,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.3e-7,"completion":0.00000156,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-32b-instruct","name":"Qwen: Qwen3 VL 32B Instruct","description":"Qwen3-VL-32B-Instruct is a large-scale multimodal vision-language model designed for high-precision understanding and reasoning across text, images, and video. With 32 billion parameters, it combines deep visual perception with advanced text...","created":1761231332,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":1.04e-7,"completion":4.16e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["max_tokens","presence_penalty","response_format","seed","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3-vl-8b-instruct","name":"Qwen: Qwen3 VL 8B Instruct","description":"Qwen3-VL-8B-Instruct is a multimodal vision-language model from the Qwen3-VL series, built for high-fidelity understanding and reasoning across text, images, and video. It features improved multimodal fusion with Interleaved-MRoPE for long-horizon...","created":1760463308,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":8e-8,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3-vl-8b-thinking","name":"Qwen: Qwen3 VL 8B Thinking","description":"Qwen3-VL-8B-Thinking is the reasoning-optimized variant of the Qwen3-VL-8B multimodal model, designed for advanced visual and textual reasoning across complex scenes, documents, and temporal sequences. It integrates enhanced multimodal alignment and...","created":1760463746,"context_length":131072,"modality":"text+image->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.17e-7,"completion":0.000001365,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3.5-122b-a10b","name":"Qwen: Qwen3.5-122B-A10B","description":"The Qwen3.5 122B-A10B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. In terms of...","created":1772053789,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.6e-7,"completion":0.00000208,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.5-27b","name":"Qwen: Qwen3.5-27B","description":"The Qwen3.5 27B native vision-language Dense model incorporates a linear attention mechanism, delivering fast response times while balancing inference speed and performance. Its overall capabilities are comparable to those of...","created":1772053810,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.95e-7,"completion":0.00000156,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.5-35b-a3b","name":"Qwen: Qwen3.5-35B-A3B","description":"The Qwen3.5 Series 35B-A3B is a native vision-language model designed with a hybrid architecture that integrates linear attention mechanisms and a sparse mixture-of-experts model, achieving higher inference efficiency. Its overall...","created":1772053822,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1.5e-7,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.5-397b-a17b","name":"Qwen: Qwen3.5 397B A17B","description":"The Qwen3.5 series 397B-A17B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. It delivers...","created":1771223018,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":3.9e-7,"completion":0.00000234,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.5-9b","name":"Qwen: Qwen3.5-9B","description":"Qwen3.5-9B is a multimodal foundation model from the Qwen3.5 family, designed to deliver strong reasoning, coding, and visual understanding in an efficient 9B-parameter architecture. It uses a unified vision-language design...","created":1773152396,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":1e-7,"completion":1.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.5-flash-02-23","name":"Qwen: Qwen3.5-Flash","description":"The Qwen3.5 native vision-language Flash models are built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. Compared to the...","created":1772053776,"context_length":1000000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":6.5e-8,"completion":2.6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3.5-plus-02-15","name":"Qwen: Qwen3.5 Plus 2026-02-15","description":"The Qwen3.5 native vision-language series Plus models are built on a hybrid architecture that integrates linear attention mechanisms with sparse mixture-of-experts models, achieving higher inference efficiency. In a variety of...","created":1771229416,"context_length":1000000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.6e-7,"completion":0.00000156,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3.5-plus-20260420","name":"Qwen: Qwen3.5 Plus 2026-04-20","description":"Qwen3.5 Plus (April 2026) is a large-scale multimodal language model from Alibaba. It accepts text, image, and video input and produces text output, with a 1M token context window. This...","created":1777261368,"context_length":1000000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":4e-7,"completion":0.0000024,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3.6-27b","name":"Qwen: Qwen3.6 27B","description":"Qwen3.6 27B is a dense 27-billion-parameter language model from the Qwen Team at Alibaba, released in April 2026. It features hybrid multimodal capabilities — accepting text, image, and video inputs...","created":1777255064,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":3.2e-7,"completion":0.0000032,"image":null,"request":null},"top_provider":{"max_completion_tokens":81920,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"qwen/qwen3.6-35b-a3b","name":"Qwen: Qwen3.6 35B A3B","description":"Qwen3.6-35B-A3B is an open-weight multimodal model from Alibaba Cloud with 35 billion total parameters and 3 billion active parameters per token. It uses a hybrid sparse mixture-of-experts architecture combining Gated...","created":1777260255,"context_length":262144,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":1.5e-7,"completion":0.000001,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"qwen/qwen3.6-flash","name":"Qwen: Qwen3.6 Flash","description":"Qwen3.6 Flash is a fast, efficient language model from Alibaba's Qwen 3.6 series. It supports text, image, and video input with a 1M token context window. Tiered pricing kicks in...","created":1777261362,"context_length":1000000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":2.5e-7,"completion":0.0000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"qwen/qwen3.6-max-preview","name":"Qwen: Qwen3.6 Max Preview","description":"Qwen3.6-Max-Preview is a proprietary frontier model from Alibaba Cloud built on a sparse mixture-of-experts architecture with approximately 1 trillion total parameters. It is optimized for agentic coding, tool use, and...","created":1777260242,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Qwen","pricing":{"prompt":0.00000104,"completion":0.00000624,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"qwen/qwen3.6-plus","name":"Qwen: Qwen3.6 Plus","description":"Qwen 3.6 Plus builds on a hybrid architecture that combines efficient linear attention with sparse mixture-of-experts routing, enabling strong scalability and high-performance inference. Compared to the 3.5 series, it delivers...","created":1775133557,"context_length":1000000,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Qwen3","pricing":{"prompt":3.25e-7,"completion":0.00000195,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","presence_penalty","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_p"]},{"id":"rekaai/reka-edge","name":"Reka Edge","description":"Reka Edge is an extremely efficient 7B multimodal vision-language model that accepts image/video+text inputs and generates text outputs. This model is optimized specifically to deliver industry-leading performance in image understanding,...","created":1774026965,"context_length":16384,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"rekaai/reka-flash-3","name":"Reka Flash 3","description":"Reka Flash 3 is a general-purpose, instruction-tuned large language model with 21 billion parameters, developed by Reka. It excels at general chat, coding tasks, instruction-following, and function calling. Featuring a...","created":1741812813,"context_length":65536,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":2e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","seed","stop","temperature","top_k","top_p"]},{"id":"relace/relace-apply-3","name":"Relace: Relace Apply 3","description":"Relace Apply 3 is a specialized code-patching LLM that merges AI-suggested edits straight into your source files. It can apply updates from GPT-4o, Claude, and others into your files at...","created":1758891572,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8.5e-7,"completion":0.00000125,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["max_tokens","seed","stop"]},{"id":"relace/relace-search","name":"Relace: Relace Search","description":"The relace-search model uses 4-12 `view_file` and `grep` tools in parallel to explore a codebase and return relevant files to the user request. In contrast to RAG, relace-search performs agentic...","created":1765213560,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000001,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":128000,"is_moderated":false},"supported_parameters":["max_tokens","seed","stop","temperature","tool_choice","tools","top_p"]},{"id":"sao10k/l3-euryale-70b","name":"Sao10k: Llama 3 Euryale 70B v2.1","description":"Euryale 70B v2.1 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). - Better prompt adherence. - Better anatomy / spatial awareness. - Adapts much better to unique and custom...","created":1718668800,"context_length":8192,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":0.00000148,"completion":0.00000148,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["frequency_penalty","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"sao10k/l3-lunaris-8b","name":"Sao10K: Llama 3 8B Lunaris","description":"Lunaris 8B is a versatile generalist and roleplaying model based on Llama 3. It's a strategic merge of multiple models, designed to balance creativity with improved logic and general knowledge....","created":1723507200,"context_length":8192,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":4e-8,"completion":5e-8,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_p"]},{"id":"sao10k/l3.1-70b-hanami-x1","name":"Sao10K: Llama 3.1 70B Hanami x1","description":"This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).","created":1736302854,"context_length":16000,"modality":"text->text","instruct_type":null,"tokenizer":"Llama3","pricing":{"prompt":0.000003,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"sao10k/l3.1-euryale-70b","name":"Sao10K: Llama 3.1 Euryale 70B v2.2","description":"Euryale L3.1 70B v2.2 is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.1](/models/sao10k/l3-euryale-70b).","created":1724803200,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":8.5e-7,"completion":8.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"sao10k/l3.3-euryale-70b","name":"Sao10K: Llama 3.3 Euryale 70B","description":"Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b).","created":1734535928,"context_length":131072,"modality":"text->text","instruct_type":"llama3","tokenizer":"Llama3","pricing":{"prompt":6.5e-7,"completion":7.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_k","top_logprobs","top_p"]},{"id":"stepfun/step-3.5-flash","name":"StepFun: Step 3.5 Flash","description":"Step 3.5 Flash is StepFun's most capable open-source foundation model. Built on a sparse Mixture of Experts (MoE) architecture, it selectively activates only 11B of its 196B parameters per token....","created":1769728337,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"switchpoint/router","name":"Switchpoint Router","description":"Switchpoint AI's router instantly analyzes your request and directs it to the optimal AI from an ever-evolving library. As the world of LLMs advances, our router gets smarter, ensuring you...","created":1752272899,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":8.5e-7,"completion":0.0000034,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","seed","stop","temperature","top_k","top_p"]},{"id":"tencent/hunyuan-a13b-instruct","name":"Tencent: Hunyuan A13B Instruct","description":"Hunyuan-A13B is a 13B active parameter Mixture-of-Experts (MoE) language model developed by Tencent, with a total parameter count of 80B and support for reasoning via Chain-of-Thought. It offers competitive benchmark...","created":1751987664,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.4e-7,"completion":5.7e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","top_k","top_p"]},{"id":"tencent/hy3-preview:free","name":"Tencent: Hy3 preview (free)","description":"Hy3 preview is a high-efficiency Mixture-of-Experts model from Tencent designed for agentic workflows and production use. It supports configurable reasoning levels across disabled, low, and high modes, allowing it to...","created":1776878150,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":262144,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"thedrummer/cydonia-24b-v4.1","name":"TheDrummer: Cydonia 24B V4.1","description":"Uncensored and creative writing model based on Mistral Small 3.2 24B with good recall, prompt adherence, and intelligence.","created":1758931878,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"thedrummer/rocinante-12b","name":"TheDrummer: Rocinante 12B","description":"Rocinante 12B is designed for engaging storytelling and rich prose. Early testers have reported: - Expanded vocabulary with unique and expressive word choices - Enhanced creativity for vivid narratives -...","created":1727654400,"context_length":32768,"modality":"text->text","instruct_type":"chatml","tokenizer":"Qwen","pricing":{"prompt":1.7e-7,"completion":4.3e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"thedrummer/skyfall-36b-v2","name":"TheDrummer: Skyfall 36B V2","description":"Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.","created":1741636566,"context_length":32768,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":5.5e-7,"completion":8e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","max_tokens","presence_penalty","repetition_penalty","seed","stop","temperature","top_k","top_p"]},{"id":"thedrummer/unslopnemo-12b","name":"TheDrummer: UnslopNemo 12B","description":"UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.","created":1731103448,"context_length":32768,"modality":"text->text","instruct_type":"mistral","tokenizer":"Mistral","pricing":{"prompt":4e-7,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"tngtech/deepseek-r1t2-chimera","name":"TNG: DeepSeek R1T2 Chimera","description":"DeepSeek-TNG-R1T2-Chimera is the second-generation Chimera model from TNG Tech. It is a 671 B-parameter mixture-of-experts text-generation model assembled from DeepSeek-AI’s R1-0528, R1, and V3-0324 checkpoints with an Assembly-of-Experts merge. The...","created":1751986985,"context_length":163840,"modality":"text->text","instruct_type":null,"tokenizer":"DeepSeek","pricing":{"prompt":3e-7,"completion":0.0000011,"image":null,"request":null},"top_provider":{"max_completion_tokens":163840,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"undi95/remm-slerp-l2-13b","name":"ReMM SLERP 13B","description":"A recreation trial of the original MythoMax-L2-B13 but with updated models. #merge","created":1689984000,"context_length":6144,"modality":"text->text","instruct_type":"alpaca","tokenizer":"Llama2","pricing":{"prompt":4.5e-7,"completion":6.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["frequency_penalty","logit_bias","logprobs","max_tokens","min_p","presence_penalty","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","top_a","top_k","top_logprobs","top_p"]},{"id":"upstage/solar-pro-3","name":"Upstage: Solar Pro 3","description":"Solar Pro 3 is Upstage's powerful Mixture-of-Experts (MoE) language model. With 102B total parameters and 12B active parameters per forward pass, it delivers exceptional performance while maintaining computational efficiency. Optimized...","created":1769481200,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.5e-7,"completion":6e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","structured_outputs","temperature","tool_choice","tools"]},{"id":"writer/palmyra-x5","name":"Writer: Palmyra X5","description":"Palmyra X5 is Writer's most advanced model, purpose-built for building and scaling AI agents across the enterprise. It delivers industry-leading speed and efficiency on context windows up to 1 million...","created":1769003823,"context_length":1040000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-7,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":8192,"is_moderated":true},"supported_parameters":["max_tokens","stop","temperature","top_k","top_p"]},{"id":"x-ai/grok-3","name":"xAI: Grok 3","description":"Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...","created":1749582908,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-3-beta","name":"xAI: Grok 3 Beta","description":"Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...","created":1744240068,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","logprobs","max_tokens","presence_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-3-mini","name":"xAI: Grok 3 Mini","description":"A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible.","created":1749583245,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":3e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-3-mini-beta","name":"xAI: Grok 3 Mini Beta","description":"Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand...","created":1744240195,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":3e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-4","name":"xAI: Grok 4","description":"Grok 4 is xAI's latest reasoning model with a 256k context window. It supports parallel tool calling, structured outputs, and both image and text inputs. Note that reasoning is not...","created":1752087689,"context_length":256000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.000003,"completion":0.000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-4-fast","name":"xAI: Grok 4 Fast","description":"Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. It comes in two flavors: non-reasoning and reasoning. Read more about the model...","created":1758240090,"context_length":2000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":2e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":30000,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-4.1-fast","name":"xAI: Grok 4.1 Fast","description":"Grok 4.1 Fast is xAI's best agentic tool calling model that shines in real-world use cases like customer support and deep research. 2M context window. Reasoning can be enabled/disabled using...","created":1763587502,"context_length":2000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":2e-7,"completion":5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":30000,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-4.20","name":"xAI: Grok 4.20","description":"Grok 4.20 is xAI's newest flagship model with industry-leading speed and agentic tool calling capabilities. It combines the lowest hallucination rate on the market with strict prompt adherance, delivering consistently...","created":1774979019,"context_length":2000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.00000125,"completion":0.0000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-4.20-multi-agent","name":"xAI: Grok 4.20 Multi-Agent","description":"Grok 4.20 Multi-Agent is a variant of xAI’s Grok 4.20 designed for collaborative, agent-based workflows. Multiple agents operate in parallel to conduct deep research, coordinate tool use, and synthesize information...","created":1774979158,"context_length":2000000,"modality":"text+image+file->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.000002,"completion":0.000006,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","structured_outputs","temperature","top_logprobs","top_p"]},{"id":"x-ai/grok-4.3","name":"xAI: Grok 4.3","description":"Grok 4.3 is a reasoning model from xAI. It accepts text and image inputs with text output, and is suited for agentic workflows, instruction-following tasks, and applications requiring high factual...","created":1777591821,"context_length":1000000,"modality":"text+image->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":0.00000125,"completion":0.0000025,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logprobs","max_tokens","presence_penalty","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"x-ai/grok-code-fast-1","name":"xAI: Grok Code Fast 1","description":"Grok Code Fast 1 is a speedy and economical reasoning model that excels at agentic coding. With reasoning traces visible in the response, developers can steer Grok Code for high-quality...","created":1756238927,"context_length":256000,"modality":"text->text","instruct_type":null,"tokenizer":"Grok","pricing":{"prompt":2e-7,"completion":0.0000015,"image":null,"request":null},"top_provider":{"max_completion_tokens":10000,"is_moderated":false},"supported_parameters":["include_reasoning","logprobs","max_tokens","reasoning","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_logprobs","top_p"]},{"id":"xiaomi/mimo-v2-flash","name":"Xiaomi: MiMo-V2-Flash","description":"MiMo-V2-Flash is an open-source foundation language model developed by Xiaomi. It is a Mixture-of-Experts model with 309B total parameters and 15B active parameters, adopting hybrid attention architecture. MiMo-V2-Flash supports a...","created":1765731308,"context_length":262144,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":9e-8,"completion":2.9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"xiaomi/mimo-v2-omni","name":"Xiaomi: MiMo-V2-Omni","description":"MiMo-V2-Omni is a frontier omni-modal model that natively processes image, video, and audio inputs within a unified architecture. It combines strong multimodal perception with agentic capability - visual grounding, multi-step...","created":1773863703,"context_length":262144,"modality":"text+image+audio+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":65536,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"]},{"id":"xiaomi/mimo-v2-pro","name":"Xiaomi: MiMo-V2-Pro","description":"MiMo-V2-Pro is Xiaomi's flagship foundation model, featuring over 1T total parameters and a 1M context length, deeply optimized for agentic scenarios. It is highly adaptable to general agent frameworks like...","created":1773863643,"context_length":1048576,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000001,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"]},{"id":"xiaomi/mimo-v2.5","name":"Xiaomi: MiMo-V2.5","description":"MiMo-V2.5 is a native omnimodal model by Xiaomi. It delivers Pro-level agentic performance at roughly half the inference cost, while surpassing MiMo-V2-Omni in multimodal perception across image and video understanding...","created":1776874269,"context_length":1048576,"modality":"text+image+audio+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":4e-7,"completion":0.000002,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"]},{"id":"xiaomi/mimo-v2.5-pro","name":"Xiaomi: MiMo-V2.5-Pro","description":"MiMo-V2.5-Pro is Xiaomi’s flagship model, delivering strong performance in general agentic capabilities, complex software engineering, and long-horizon tasks, with top rankings on benchmarks such as ClawEval, GDPVal, and SWE-bench Pro....","created":1776874273,"context_length":1048576,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.000001,"completion":0.000003,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","response_format","stop","temperature","tool_choice","tools","top_p"]},{"id":"z-ai/glm-4-32b","name":"Z.ai: GLM 4 32B","description":"GLM 4 32B is a cost-effective foundation language model. It can efficiently perform complex tasks and has significantly enhanced capabilities in tool use, online search, and code-related intelligent tasks. It...","created":1753376617,"context_length":128000,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1e-7,"completion":1e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["max_tokens","temperature","tool_choice","tools","top_p"]},{"id":"z-ai/glm-4.5","name":"Z.ai: GLM 4.5","description":"GLM-4.5 is our latest flagship foundation model, purpose-built for agent-based applications. It leverages a Mixture-of-Experts (MoE) architecture and supports a context length of up to 128k tokens. GLM-4.5 delivers significantly...","created":1753471347,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-7,"completion":0.0000022,"image":null,"request":null},"top_provider":{"max_completion_tokens":98304,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-4.5-air","name":"Z.ai: GLM 4.5 Air","description":"GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter...","created":1753471258,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":1.3e-7,"completion":8.5e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":98304,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-4.5-air:free","name":"Z.ai: GLM 4.5 Air (free)","description":"GLM-4.5-Air is the lightweight variant of our latest flagship model family, also purpose-built for agent-centric applications. Like GLM-4.5, it adopts the Mixture-of-Experts (MoE) architecture but with a more compact parameter...","created":1753471258,"context_length":131072,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0,"completion":0,"image":null,"request":null},"top_provider":{"max_completion_tokens":96000,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","temperature","tool_choice","tools","top_p"]},{"id":"z-ai/glm-4.5v","name":"Z.ai: GLM 4.5V","description":"GLM-4.5V is a vision-language foundation model for multimodal agent applications. Built on a Mixture-of-Experts (MoE) architecture with 106B parameters and 12B activated parameters, it achieves state-of-the-art results in video understanding,...","created":1754922288,"context_length":65536,"modality":"text+image->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-7,"completion":0.0000018,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-4.6","name":"Z.ai: GLM 4.6","description":"Compared with GLM-4.5, this generation brings several key improvements: Longer context window: The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex...","created":1759235576,"context_length":204800,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3.9e-7,"completion":0.0000019,"image":null,"request":null},"top_provider":{"max_completion_tokens":204800,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-4.6v","name":"Z.ai: GLM 4.6V","description":"GLM-4.6V is a large multimodal model designed for high-fidelity visual understanding and long-context reasoning across images, documents, and mixed media. It supports up to 128K tokens, processes complex page layouts...","created":1765207462,"context_length":131072,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3e-7,"completion":9e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":24000,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","max_tokens","presence_penalty","reasoning","repetition_penalty","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-4.7","name":"Z.ai: GLM 4.7","description":"GLM-4.7 is Z.ai’s latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while...","created":1766378014,"context_length":202752,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":3.8e-7,"completion":0.00000174,"image":null,"request":null},"top_provider":{"max_completion_tokens":null,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"z-ai/glm-4.7-flash","name":"Z.ai: GLM 4.7 Flash","description":"As a 30B-class SOTA model, GLM-4.7-Flash offers a new option that balances performance and efficiency. It is further optimized for agentic coding use cases, strengthening coding capabilities, long-horizon task planning,...","created":1768833913,"context_length":202752,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-8,"completion":4e-7,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-5","name":"Z.ai: GLM 5","description":"GLM-5 is Z.ai’s flagship open-source foundation model engineered for complex systems design and long-horizon agent workflows. Built for expert developers, it delivers production-grade performance on large-scale programming tasks, rivaling leading...","created":1770829182,"context_length":202752,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":6e-7,"completion":0.00000208,"image":null,"request":null},"top_provider":{"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"z-ai/glm-5-turbo","name":"Z.ai: GLM 5 Turbo","description":"GLM-5 Turbo is a new model from Z.ai designed for fast inference and strong performance in agent-driven environments such as OpenClaw scenarios. It is deeply optimized for real-world agent workflows...","created":1773583573,"context_length":202752,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.0000012,"completion":0.000004,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","max_tokens","min_p","presence_penalty","reasoning","repetition_penalty","response_format","seed","stop","temperature","tool_choice","tools","top_k","top_p"]},{"id":"z-ai/glm-5.1","name":"Z.ai: GLM 5.1","description":"GLM-5.1 delivers a major leap in coding capability, with particularly significant gains in handling long-horizon tasks. Unlike previous models built around minute-level interactions, GLM-5.1 can work independently and continuously on...","created":1775578025,"context_length":202752,"modality":"text->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.00000105,"completion":0.0000035,"image":null,"request":null},"top_provider":{"max_completion_tokens":65535,"is_moderated":false},"supported_parameters":["frequency_penalty","include_reasoning","logit_bias","logprobs","max_tokens","min_p","parallel_tool_calls","presence_penalty","reasoning","reasoning_effort","repetition_penalty","response_format","seed","stop","structured_outputs","temperature","tool_choice","tools","top_k","top_logprobs","top_p"]},{"id":"z-ai/glm-5v-turbo","name":"Z.ai: GLM 5V Turbo","description":"GLM-5V-Turbo is Z.ai’s first native multimodal agent foundation model, built for vision-based coding and agent-driven tasks. It natively handles image, video, and text inputs, excels at long-horizon planning, complex coding,...","created":1775061458,"context_length":202752,"modality":"text+image+video->text","instruct_type":null,"tokenizer":"Other","pricing":{"prompt":0.0000012,"completion":0.000004,"image":null,"request":null},"top_provider":{"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["include_reasoning","max_tokens","reasoning","response_format","temperature","tool_choice","tools","top_p"]}],"summary":{"by_namespace":[{"namespace":"openai","count":64},{"namespace":"qwen","count":51},{"namespace":"google","count":31},{"namespace":"mistralai","count":25},{"namespace":"anthropic","count":14},{"namespace":"meta-llama","count":14},{"namespace":"deepseek","count":13},{"namespace":"z-ai","count":13},{"namespace":"nvidia","count":11},{"namespace":"x-ai","count":11},{"namespace":"minimax","count":8},{"namespace":"arcee-ai","count":7},{"namespace":"baidu","count":6},{"namespace":"nousresearch","count":6},{"namespace":"amazon","count":5},{"namespace":"moonshotai","count":5},{"namespace":"openrouter","count":5},{"namespace":"perplexity","count":5},{"namespace":"sao10k","count":5},{"namespace":"xiaomi","count":5}],"by_modality":{"text+image->text":66,"text+image+file+audio+video->text":13,"text+image+file->text":49,"text->text":199,"text+image+file+video->text":1,"text+image+video->text":25,"text+image->text+image":3,"text+image+file+audio->text":1,"text+image->text+audio":2,"text+audio->text":1,"text+image+audio+video->text":3,"text+audio->text+audio":3,"text+image+file->text+image":3,"text+file->text":2,"text+image+file+audio+video->text+image":1},"cheapest_input":{"id":"ibm-granite/granite-4.0-h-micro","usd_per_million":0.017},"cheapest_output":{"id":"meta-llama/llama-guard-3-8b","usd_per_million":0.03},"largest_context":{"id":"openrouter/auto","tokens":2000000},"free_tier_count":33}}}