mirror of
https://github.com/microsoft/vscode.git
synced 2026-05-16 13:21:04 +01:00
6b5334c5f4
* feat: make stream a caller-controlled passthrough in Messages API Allow callers to set stream: false via requestOptions instead of hardcoding stream: true. Add non-streaming response handler for the Anthropic Messages API that parses single JSON responses. - createMessagesRequestBody: stream: true → options.requestOptions?.stream ?? true - preparePostOptions: stream: true as default before spread (callers can override) - processResponseFromMessagesEndpoint: auto-detect via Content-Type header - processNonStreamingResponseFromMessagesEndpoint: new handler for JSON responses with tool call support in finishedCb delta, defensive parsing, cache-token consistency warning, unknown block type logging - Remove stale 'stream not respected' comment from fetch.ts - Remove stream: false from agentIntent.ts inline summarization - 10 new tests for non-streaming handler * fix: add telemetry parity for non-streaming path and bump cache salt * regenerate simulation cache for review-inline tests * Regenerate simulation cache after rebase * Temporarily disable multifile-edit-claude variant (#315940) claude-3.5-sonnet returns model_not_supported from the endpoint, breaking simulation cache regen. Re-enable when the test is updated to use a currently-supported Claude model. * Fix terminal strict-mode crash on empty suggestions + update baseline - terminal.stest.ts: guard strict-mode `ok()` predicate so when the model returns no code block, the test fails cleanly with the existing message instead of crashing with 'Cannot read properties of undefined (reading match)'. Also drop the stale commented-out debug block. - baseline.json: refresh scores (68.01 -> 68.69) and drop the 14 entries for the disabled multifile-edit-claude variant (see #315940). - Remove now-orphaned multifile-edit-claude-panel.json outcome file. * Apply CI-observed score improvements for cpp inline scenarios CI on Linux scores 4 cpp InlineChatIntent scenarios higher than my local macOS run does (likely platform-specific line-ending/whitespace normalization in the cpp grader). Update baseline.json to match the Linux scores: - edit-InlineChatIntent [inline] [cpp] - edit for cpp: 5 -> 9 - edit-InlineChatIntent [inline] [cpp] - edit for macro: 0 -> 2 - generate-InlineChatIntent [inline] [cpp] - cpp code generation: 3 -> 10 - generate-InlineChatIntent [inline] [cpp] - templated code gen: 0 -> 10 Overall score: 68.69 -> 68.86. * Populate cpp diagnostic cache via Docker for cross-platform parity The earlier rebase cache regen produced new LLM responses for the cpp inline tests but failed to populate the clang diagnostic provider cache for those new inputs, because clang detection on macOS is broken (Apple clang prints '-v' output to stderr, but findIfInstalled only checks stdout) and Docker wasn't running. As a result the cpp diagnostic cache was missing entries for the new LLM responses, and CI re-ran clang live on each platform with diverging results: - Linux CI: clang available, scored highest (9, 2, 10, 10) - Windows CI: no clang, errored out (5, 0, 10, 10 with worsening) - macOS: Apple clang misdetected as missing, Docker off, errored This commit: 1. Bumps CLANG_DIAGNOSTICS_PROVIDER_CACHE_SALT 5 -> 6 to invalidate any contaminated entries. 2. Adds two new cache layers populated by running cpp tests via Docker (using the mcr.microsoft.com/devcontainers/cpp image, same Linux clang as CI). All 14 cpp scenarios now produce deterministic, platform-independent diagnostic results when read from cache. Verified with --require-cache: all cpp scenarios pass without invoking clang/docker at runtime.
75 lines
1.9 KiB
TypeScript
75 lines
1.9 KiB
TypeScript
/*---------------------------------------------------------------------------------------------
|
|
* Copyright (c) Microsoft Corporation. All rights reserved.
|
|
* Licensed under the MIT License. See License.txt in the project root for license information.
|
|
*--------------------------------------------------------------------------------------------*/
|
|
|
|
// These values are used as input for computing sha256 hashes for caching.
|
|
// Bump them to regenerate new cache entries or when the cache object shape changes.
|
|
|
|
/**
|
|
* Used for all ChatML requests (all models).
|
|
*/
|
|
export const CHAT_ML_CACHE_SALT_PER_MODEL: Record<string, string> = {
|
|
'DEFAULT': '2026-04-28T00:00:00Z',
|
|
'copilot-nes-oct': '2026-02-10T12:14:18.526Z',
|
|
};
|
|
|
|
/**
|
|
* Used for all NES requests.
|
|
*/
|
|
export const OPENAI_FETCHER_CACHE_SALT: { getByUrl: (url: string) => string } = new class {
|
|
private readonly _cacheSaltByUrl: Record<string, string> = Object.freeze({
|
|
// Other endpoints
|
|
'DEFAULT': '2024-09-25T11:25:00Z',
|
|
});
|
|
|
|
getByUrl(url: string): string {
|
|
if (url in this._cacheSaltByUrl) {
|
|
return this._cacheSaltByUrl[url];
|
|
} else {
|
|
return this._cacheSaltByUrl['DEFAULT'];
|
|
}
|
|
}
|
|
};
|
|
|
|
/**
|
|
* Used for all Code Search requests.
|
|
*/
|
|
export const CODE_SEARCH_CACHE_SALT = '';
|
|
|
|
/**
|
|
* Used for all diagnostics providers.
|
|
*/
|
|
export const CACHING_DIAGNOSTICS_PROVIDER_CACHE_SALT = 4;
|
|
|
|
/**
|
|
* Used by the clang diagnostics provider.
|
|
*/
|
|
export const CLANG_DIAGNOSTICS_PROVIDER_CACHE_SALT = 6;
|
|
|
|
/**
|
|
* Used by the TS diagnostics provider.
|
|
*/
|
|
export const TS_SERVER_DIAGNOSTICS_PROVIDER_CACHE_SALT = 5;
|
|
|
|
/**
|
|
* Used by `isValidPythonFile`.
|
|
*/
|
|
export const PYTHON_VALID_SYNTAX_CACHE_SALT = 2;
|
|
|
|
/**
|
|
* Used by `canExecutePythonCodeWithoutErrors`.
|
|
*/
|
|
export const PYTHON_EXECUTES_WITHOUT_ERRORS = 2;
|
|
|
|
/**
|
|
* Used by `isValidNotebookCell`.
|
|
*/
|
|
export const NOTEBOOK_CELL_VALID_CACHE_SALT = 1;
|
|
|
|
|
|
/**
|
|
* Used for all Chunking Endpoint requests.
|
|
*/
|
|
export const CHUNKING_ENDPOINT_CACHE_SALT = '';
|