mirror of
https://github.com/microsoft/vscode.git
synced 2026-04-25 11:08:51 +01:00
* add integration tests for LanguageModelChat#sendRequest * make sure errors are recreated when making LM requests * disable test with a note for later * fix remote integration tests
154 lines
3.9 KiB
TypeScript
154 lines
3.9 KiB
TypeScript
/*---------------------------------------------------------------------------------------------
|
|
* Copyright (c) Microsoft Corporation. All rights reserved.
|
|
* Licensed under the MIT License. See License.txt in the project root for license information.
|
|
*--------------------------------------------------------------------------------------------*/
|
|
|
|
import 'mocha';
|
|
import * as assert from 'assert';
|
|
import * as vscode from 'vscode';
|
|
import { assertNoRpc, closeAllEditors, DeferredPromise, disposeAll } from '../utils';
|
|
|
|
|
|
suite('lm', function () {
|
|
|
|
let disposables: vscode.Disposable[] = [];
|
|
|
|
setup(function () {
|
|
disposables = [];
|
|
});
|
|
|
|
teardown(async function () {
|
|
assertNoRpc();
|
|
await closeAllEditors();
|
|
disposeAll(disposables);
|
|
});
|
|
|
|
|
|
test('lm request and stream', async function () {
|
|
|
|
let p: vscode.Progress<vscode.ChatResponseFragment> | undefined;
|
|
const defer = new DeferredPromise<void>();
|
|
|
|
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
|
|
async provideLanguageModelResponse(_messages, _options, _extensionId, progress, _token) {
|
|
p = progress;
|
|
return defer.p;
|
|
},
|
|
async provideTokenCount(_text, _token) {
|
|
return 1;
|
|
},
|
|
}, {
|
|
name: 'test-lm',
|
|
version: '1.0.0',
|
|
family: 'test',
|
|
vendor: 'test-lm-vendor',
|
|
maxInputTokens: 100,
|
|
maxOutputTokens: 100,
|
|
}));
|
|
|
|
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
|
|
assert.strictEqual(models.length, 1);
|
|
|
|
const request = await models[0].sendRequest([vscode.LanguageModelChatMessage.User('Hello')]);
|
|
|
|
// assert we have a request immediately
|
|
assert.ok(request);
|
|
assert.ok(p);
|
|
assert.strictEqual(defer.isSettled, false);
|
|
|
|
let streamDone = false;
|
|
let responseText = '';
|
|
|
|
const pp = (async () => {
|
|
for await (const chunk of request.text) {
|
|
responseText += chunk;
|
|
}
|
|
streamDone = true;
|
|
})();
|
|
|
|
assert.strictEqual(responseText, '');
|
|
assert.strictEqual(streamDone, false);
|
|
|
|
p.report({ index: 0, part: 'Hello' });
|
|
defer.complete();
|
|
|
|
await pp;
|
|
await new Promise(r => setTimeout(r, 1000));
|
|
|
|
assert.strictEqual(streamDone, true);
|
|
assert.strictEqual(responseText, 'Hello');
|
|
});
|
|
|
|
test('lm request fail', async function () {
|
|
|
|
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
|
|
async provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
|
|
throw new Error('BAD');
|
|
},
|
|
async provideTokenCount(_text, _token) {
|
|
return 1;
|
|
},
|
|
}, {
|
|
name: 'test-lm',
|
|
version: '1.0.0',
|
|
family: 'test',
|
|
vendor: 'test-lm-vendor',
|
|
maxInputTokens: 100,
|
|
maxOutputTokens: 100,
|
|
}));
|
|
|
|
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
|
|
assert.strictEqual(models.length, 1);
|
|
|
|
try {
|
|
await models[0].sendRequest([vscode.LanguageModelChatMessage.User('Hello')]);
|
|
assert.ok(false, 'EXPECTED error');
|
|
} catch (error) {
|
|
assert.ok(error instanceof Error);
|
|
}
|
|
});
|
|
|
|
test('lm stream fail', async function () {
|
|
|
|
const defer = new DeferredPromise<void>();
|
|
|
|
disposables.push(vscode.lm.registerChatModelProvider('test-lm', {
|
|
async provideLanguageModelResponse(_messages, _options, _extensionId, _progress, _token) {
|
|
return defer.p;
|
|
},
|
|
async provideTokenCount(_text, _token) {
|
|
return 1;
|
|
},
|
|
}, {
|
|
name: 'test-lm',
|
|
version: '1.0.0',
|
|
family: 'test',
|
|
vendor: 'test-lm-vendor',
|
|
maxInputTokens: 100,
|
|
maxOutputTokens: 100,
|
|
}));
|
|
|
|
const models = await vscode.lm.selectChatModels({ id: 'test-lm' });
|
|
assert.strictEqual(models.length, 1);
|
|
|
|
const res = await models[0].sendRequest([vscode.LanguageModelChatMessage.User('Hello')]);
|
|
assert.ok(res);
|
|
|
|
const result = (async () => {
|
|
for await (const _chunk of res.text) {
|
|
|
|
}
|
|
})();
|
|
|
|
defer.error(new Error('STREAM FAIL'));
|
|
|
|
try {
|
|
await result;
|
|
assert.ok(false, 'EXPECTED error');
|
|
} catch (error) {
|
|
assert.ok(error);
|
|
// assert.ok(error instanceof Error); // todo@jrieken requires one more insiders
|
|
}
|
|
});
|
|
});
|