mirror of
https://github.com/home-assistant/core.git
synced 2025-12-25 05:26:47 +00:00
94 lines
2.6 KiB
JSON
94 lines
2.6 KiB
JSON
{
|
|
"data": [
|
|
{
|
|
"id": "openai/gpt-3.5-turbo",
|
|
"canonical_slug": "openai/gpt-3.5-turbo",
|
|
"hugging_face_id": null,
|
|
"name": "OpenAI: GPT-3.5 Turbo",
|
|
"created": 1695859200,
|
|
"description": "This model is a variant of GPT-3.5 Turbo tuned for instructional prompts and omitting chat-related optimizations. Training data: up to Sep 2021.",
|
|
"context_length": 4095,
|
|
"architecture": {
|
|
"modality": "text->text",
|
|
"input_modalities": ["text"],
|
|
"output_modalities": ["text"],
|
|
"tokenizer": "GPT",
|
|
"instruct_type": "chatml"
|
|
},
|
|
"pricing": {
|
|
"prompt": "0.0000015",
|
|
"completion": "0.000002",
|
|
"request": "0",
|
|
"image": "0",
|
|
"web_search": "0",
|
|
"internal_reasoning": "0"
|
|
},
|
|
"top_provider": {
|
|
"context_length": 4095,
|
|
"max_completion_tokens": 4096,
|
|
"is_moderated": true
|
|
},
|
|
"per_request_limits": null,
|
|
"supported_parameters": [
|
|
"max_tokens",
|
|
"temperature",
|
|
"top_p",
|
|
"stop",
|
|
"frequency_penalty",
|
|
"presence_penalty",
|
|
"seed",
|
|
"logit_bias",
|
|
"logprobs",
|
|
"top_logprobs",
|
|
"response_format"
|
|
]
|
|
},
|
|
{
|
|
"id": "openai/gpt-4",
|
|
"canonical_slug": "openai/gpt-4",
|
|
"hugging_face_id": null,
|
|
"name": "OpenAI: GPT-4",
|
|
"created": 1685232000,
|
|
"description": "OpenAI's flagship model, GPT-4 is a large-scale multimodal language model capable of solving difficult problems with greater accuracy than previous models due to its broader general knowledge and advanced reasoning capabilities. Training data: up to Sep 2021.",
|
|
"context_length": 8191,
|
|
"architecture": {
|
|
"modality": "text->text",
|
|
"input_modalities": ["text"],
|
|
"output_modalities": ["text"],
|
|
"tokenizer": "GPT",
|
|
"instruct_type": null
|
|
},
|
|
"pricing": {
|
|
"prompt": "0.00003",
|
|
"completion": "0.00006",
|
|
"request": "0",
|
|
"image": "0",
|
|
"web_search": "0",
|
|
"internal_reasoning": "0"
|
|
},
|
|
"top_provider": {
|
|
"context_length": 8191,
|
|
"max_completion_tokens": 4096,
|
|
"is_moderated": true
|
|
},
|
|
"per_request_limits": null,
|
|
"supported_parameters": [
|
|
"max_tokens",
|
|
"temperature",
|
|
"top_p",
|
|
"tools",
|
|
"tool_choice",
|
|
"stop",
|
|
"frequency_penalty",
|
|
"presence_penalty",
|
|
"seed",
|
|
"logit_bias",
|
|
"logprobs",
|
|
"top_logprobs",
|
|
"structured_outputs",
|
|
"response_format"
|
|
]
|
|
}
|
|
]
|
|
}
|