more API todos (#251023)

https://github.com/microsoft/vscode/issues/250007
This commit is contained in:
Johannes Rieken
2025-06-09 20:30:06 +02:00
committed by GitHub
parent b2203d7a02
commit 4e037b6e1f

View File

@@ -5,10 +5,6 @@
declare module 'vscode' {
export interface ChatResponseFragment2 {
index: number;
part: LanguageModelTextPart | LanguageModelToolCallPart;
}
// @API extension ship a d.ts files for their options
@@ -16,6 +12,38 @@ declare module 'vscode' {
// concrete models. The `provideLanguageModelChatData` would do the discovery and auth dances and later
// the model data is passed to the concrete function for making a requested or counting token
// TODO@API name scheme
export interface LanguageModelChatResponseOptions {
// initiator
readonly extensionId: string;
/**
* A set of options that control the behavior of the language model. These options are specific to the language model
* and need to be looked up in the respective documentation.
*/
readonly modelOptions: { [name: string]: any };
/**
* An optional list of tools that are available to the language model. These could be registered tools available via
* {@link lm.tools}, or private tools that are just implemented within the calling extension.
*
* If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in
* {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool
* registered in {@link lm.tools}, that means calling {@link lm.invokeTool}.
*
* Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a
* {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}.
*/
tools?: LanguageModelChatTool[];
/**
* The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default.
*/
toolMode?: LanguageModelChatToolMode;
}
export interface LanguageModelChatData {
// like ChatResponseProviderMetadata
}
@@ -24,11 +52,19 @@ declare module 'vscode' {
provideLanguageModelChatData(options: { force: boolean }, token: CancellationToken): ProviderResult<LanguageModelChatData[]>;
provideResponse(model: LanguageModelChatData, messages: Array<LanguageModelChatMessage | LanguageModelChatMessage2>, options: LanguageModelChatRequestOptions, extensionId: string, progress: Progress<ChatResponseFragment2>, token: CancellationToken): Thenable<any>;
provideResponse(model: LanguageModelChatData, messages: Array<LanguageModelChatMessage | LanguageModelChatMessage2>, options: LanguageModelChatResponseOptions, progress: Progress<LanguageModelTextPart | LanguageModelToolCallPart>, token: CancellationToken): Thenable<any>;
provideTokenCount(model: LanguageModelChatData, text: string | LanguageModelChatMessage | LanguageModelChatMessage2, token: CancellationToken): Thenable<number>;
}
export interface ChatResponseFragment2 {
index: number;
part: LanguageModelTextPart | LanguageModelToolCallPart;
}
/**
* Represents a large language model that accepts ChatML messages and produces a streaming response
*/
@@ -37,6 +73,8 @@ declare module 'vscode' {
// TODO@API remove or keep proposed?
onDidReceiveLanguageModelResponse2?: Event<{ readonly extensionId: string; readonly participant?: string; readonly tokenCount?: number }>;
// TODO@API
// have dedicated options, don't reuse the LanguageModelChatRequestOptions so that consumer and provider part of the API can develop independently
provideLanguageModelResponse(messages: Array<LanguageModelChatMessage | LanguageModelChatMessage2>, options: LanguageModelChatRequestOptions, extensionId: string, progress: Progress<ChatResponseFragment2>, token: CancellationToken): Thenable<any>;
provideTokenCount(text: string | LanguageModelChatMessage | LanguageModelChatMessage2, token: CancellationToken): Thenable<number>;