Add comments to the BYOK API (#262543)

This commit is contained in:
Logan Ramos
2025-08-20 15:34:41 -04:00
committed by GitHub
parent 5b009d5328
commit eadd76d8df

View File

@@ -7,10 +7,9 @@
declare module 'vscode' {
// TODO: Document all methods and types
// TODO@API name scheme
/**
* The provider version of {@linkcode LanguageModelChatRequestOptions}
*/
export interface LanguageModelChatRequestHandleOptions {
// initiator
@@ -42,8 +41,14 @@ declare module 'vscode' {
readonly toolMode?: LanguageModelChatToolMode;
}
/**
* All the information representing a single language model contributed by a {@linkcode LanguageModelChatProvider}.
*/
export interface LanguageModelChatInformation {
/**
* Unique identifier for the language model. Must be unique per provider, but not required to be globally unique.
*/
readonly id: string;
/**
@@ -73,25 +78,35 @@ declare module 'vscode' {
*/
readonly version: string;
/**
* The maximum number of tokens the model can accept as input.
*/
readonly maxInputTokens: number;
/**
* The maximum number of tokens the model is capable of producing.
*/
readonly maxOutputTokens: number;
/**
* When present, this gates the use of `requestLanguageModelAccess` behind an authorization flow where
* the user must approve of another extension accessing the models contributed by this extension.
* Additionally, the extension can provide a label that will be shown in the UI.
*
*
* TODO: What should the label explain?
* The label should indicate why the user is being asked to approve access.
*/
requiresAuthorization?: true | { label: string };
// TODO@API maybe an enum, LanguageModelChatProviderPickerAvailability?
// TODO@API isPreselected proposed
/**
* Whether or not this will be selected by default in the model picker
* NOT BEING FINALIZED
*/
readonly isDefault?: boolean;
// TODO@API nuke
/**
* Whether or not the model will show up in the model picker immediately upon being made known via {@linkcode LanguageModelChatProvider.prepareLanguageModelChatInformation}.
* NOT BEING FINALIZED
*/
readonly isUserSelectable?: boolean;
readonly capabilities?: {
@@ -99,7 +114,10 @@ declare module 'vscode' {
// TODO@API have mimeTypes that you support
readonly vision?: boolean;
// TODO@API should be `boolean | number` so extensions can express how many tools they support
/**
* Whether tool calling is supported by the model.
* If a number is provided, that is the maximum number of tools a model can call.
*/
readonly toolCalling?: boolean | number;
};
@@ -108,13 +126,13 @@ declare module 'vscode' {
* The lower the order, the higher the category appears in the list.
* Has no effect if `isUserSelectable` is `false`.
*
* TODO: Don't finalize?
* WONT BE FINALIZED
*/
readonly category?: { label: string; order: number };
}
/**
* The provider version of @link {LanguageModelChatMessage}.
* The provider version of { @link LanguageModelChatMessage}.
*/
export interface LanguageModelChatRequestMessage {
/**
@@ -134,6 +152,10 @@ declare module 'vscode' {
readonly name: string | undefined;
}
/**
* Represents a Language model chat provider. This provider provides multiple models in a 1 provider to many model relationship
* An example of this would be how an OpenAI provider would provide models like gpt-5, o3, etc.
*/
export interface LanguageModelChatProvider<T extends LanguageModelChatInformation = LanguageModelChatInformation> {
/**
@@ -141,20 +163,53 @@ declare module 'vscode' {
*/
readonly onDidChangeLanguageModelInformation?: Event<void>;
// NOT cacheable (between reloads)
/**
* Get the list of available language models contributed by this provider
* @param options Options which specify the calling context of this function
* @param token A cancellation token which signals if the user cancelled the request or not
* @returns A promise that resolves to the list of available language models
*/
prepareLanguageModelChatInformation(options: PrepareLanguageModelChatModelOptions, token: CancellationToken): ProviderResult<T[]>;
/**
* Returns the response for a chat request
* @param model The language model to use
* @param messages The messages to include in the request
* @param options Options for the request
* @param progress The progress to emit the streamed response chunks to
* @param token A cancellation token for the request
*/
provideLanguageModelChatResponse(model: T, messages: readonly LanguageModelChatRequestMessage[], options: LanguageModelChatRequestHandleOptions, progress: Progress<LanguageModelTextPart | LanguageModelToolCallPart | LanguageModelDataPart | LanguageModelThinkingPart>, token: CancellationToken): Thenable<any>;
/**
* Returns the number of tokens for a given text using the model specific tokenizer logic
* @param model The language model to use
* @param text The text to count tokens for
* @param token A cancellation token for the request
* @returns A promise that resolves to the number of tokens
*/
provideTokenCount(model: T, text: string | LanguageModelChatRequestMessage, token: CancellationToken): Thenable<number>;
}
export namespace lm {
/**
* Registers a {@linkcode LanguageModelChatProvider}
* @param vendor The vendor for this provider. Must be globally unique
* @param provider The provider to register
* @returns A disposable that unregisters the provider when disposed
*/
export function registerLanguageModelChatProvider(vendor: string, provider: LanguageModelChatProvider): Disposable;
}
/**
* The list of options passed into {@linkcode LanguageModelChatProvider.prepareLanguageModelChatInformation}
*/
export interface PrepareLanguageModelChatModelOptions {
/**
* Whether or not the user should be prompted via some UI flow, or if models should be attempted to be resolved silently.
* If silent is true, all models may not be resolved due to lack of info such as API keys.
*/
readonly silent: boolean;
}
}