Finalize the bring your own key api (#255892)

* Start small

* start on new byok api

* Some more api work

* Hook up more of the API

* A single file with no errors yay!

* Some cleanup

* Slow and steady progress

* More code

* Add resolve

* Hate everything

* More progress on rewriting id

* Some error fixing

* Fix null service

* Use claude to fix tests

* Fix tests

* Fix tests

* More test fixing

* Some setup

* Some model picker stuff

* Fix model picker with new BYOK api

* Remove support for contirbuting to mdel picker action bar

* Adjust to allow for user prompting

* Add event

* Fix conflicts
This commit is contained in:
Logan Ramos
2025-07-18 13:42:55 -04:00
committed by GitHub
parent a426831dba
commit 8551a1d51d
19 changed files with 699 additions and 508 deletions

View File

@@ -13,11 +13,11 @@ suite('lm', function () {
let disposables: vscode.Disposable[] = [];
const testProviderOptions: vscode.ChatResponseProviderMetadata = {
const testProviderOptions: vscode.LanguageModelChatInformation = {
id: 'test-lm',
name: 'test-lm',
version: '1.0.0',
family: 'test',
vendor: 'test-lm-vendor',
maxInputTokens: 100,
maxOutputTokens: 100,
};
@@ -38,15 +38,23 @@ suite('lm', function () {
let p: vscode.Progress<vscode.ChatResponseFragment2> | undefined;
const defer = new DeferredPromise<void>();
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
async provideLanguageModelResponse(_messages, _options, _extensionId, progress, _token) {
p = progress;
return defer.p;
},
async provideTokenCount(_text, _token) {
return 1;
},
}, testProviderOptions));
try {
disposables.push(vscode.lm.registerChatModelProvider('test-lm-vendor', {
async prepareLanguageModelChat(_options, _token) {
return [testProviderOptions];
},
async provideLanguageModelChatResponse(_model, _messages, _options, progress, _token) {
p = progress;
return defer.p;
},
async provideTokenCount(_model, _text, _token) {
return 1;
},
}));
} catch (e) {
assert.fail(`Failed to register chat model provider: ${e}`);
}
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
assert.strictEqual(models.length, 1);
@@ -83,14 +91,17 @@ suite('lm', function () {
test('lm request fail', async function () {
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
async provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
disposables.push(vscode.lm.registerChatModelProvider('test-lm-vendor', {
async prepareLanguageModelChat(_options, _token) {
return [testProviderOptions];
},
async provideLanguageModelChatResponse(_model, _messages, _options, _progress, _token) {
throw new Error('BAD');
},
async provideTokenCount(_text, _token) {
async provideTokenCount(_model, _text, _token) {
return 1;
},
}, testProviderOptions));
}));
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
assert.strictEqual(models.length, 1);
@@ -107,14 +118,17 @@ suite('lm', function () {
const defer = new DeferredPromise<void>();
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
async provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
disposables.push(vscode.lm.registerChatModelProvider('test-lm-vendor', {
async prepareLanguageModelChat(_options, _token) {
return [testProviderOptions];
},
async provideLanguageModelChatResponse(_model, _messages, _options, _progress, _token) {
return defer.p;
},
async provideTokenCount(_text, _token) {
async provideTokenCount(_model, _text, _token) {
return 1;
}
}, testProviderOptions));
}));
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
assert.strictEqual(models.length, 1);
@@ -142,14 +156,17 @@ suite('lm', function () {
test('LanguageModelError instance is not thrown to extensions#235322 (SYNC)', async function () {
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
disposables.push(vscode.lm.registerChatModelProvider('test-lm-vendor', {
async prepareLanguageModelChat(_options, _token) {
return [testProviderOptions];
},
provideLanguageModelChatResponse(_model, _messages, _options, _progress, _token) {
throw vscode.LanguageModelError.Blocked('You have been blocked SYNC');
},
async provideTokenCount(_text, _token) {
async provideTokenCount(_model, _text, _token) {
return 1;
}
}, testProviderOptions));
}));
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
assert.strictEqual(models.length, 1);
@@ -165,14 +182,17 @@ suite('lm', function () {
test('LanguageModelError instance is not thrown to extensions#235322 (ASYNC)', async function () {
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
async provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
disposables.push(vscode.lm.registerChatModelProvider('test-lm-vendor', {
async prepareLanguageModelChat(_options, _token) {
return [testProviderOptions];
},
async provideLanguageModelChatResponse(_model, _messages, _options, _progress, _token) {
throw vscode.LanguageModelError.Blocked('You have been blocked ASYNC');
},
async provideTokenCount(_text, _token) {
async provideTokenCount(_model, _text, _token) {
return 1;
}
}, testProviderOptions));
}));
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
assert.strictEqual(models.length, 1);