diff --git a/ts/linkPreviews/linkPreviewFetch.ts b/ts/linkPreviews/linkPreviewFetch.ts index 80d9017d92..6932cd34fb 100644 --- a/ts/linkPreviews/linkPreviewFetch.ts +++ b/ts/linkPreviews/linkPreviewFetch.ts @@ -27,9 +27,9 @@ const MAX_CONTENT_TYPE_LENGTH_TO_PARSE = 100; // Though we'll accept HTML of any Content-Length (including no specified length), we // will only load some of the HTML. So we might start loading a 99 gigabyte HTML page -// but only parse the first 500 kilobytes. However, if the Content-Length is less than +// but only parse the first 1000 kilobytes. However, if the Content-Length is less than // this, we won't waste space. -const MAX_HTML_BYTES_TO_LOAD = 500 * 1024; +const MAX_HTML_BYTES_TO_LOAD = 1000 * 1024; // `x` is 8 bytes. Nothing else (meta tags, etc) will even fit, so we can ignore // it. This is mostly to protect us against empty response bodies. diff --git a/ts/test-electron/linkPreviews/linkPreviewFetch_test.ts b/ts/test-electron/linkPreviews/linkPreviewFetch_test.ts index 55a1084bd7..a9996f72d3 100644 --- a/ts/test-electron/linkPreviews/linkPreviewFetch_test.ts +++ b/ts/test-electron/linkPreviews/linkPreviewFetch_test.ts @@ -843,7 +843,7 @@ describe('link preview fetching', () => { sinon.assert.notCalled(shouldNeverBeCalled); }); - it('stops reading bodies after 500 kilobytes', async function test() { + it('stops reading bodies after 1000 kilobytes', async function test() { const shouldNeverBeCalled = sinon.stub(); const fakeFetch = stub().resolves( @@ -855,6 +855,9 @@ describe('link preview fetching', () => { const spaces = new Uint8Array(250 * 1024).fill(32); yield spaces; yield spaces; + yield spaces; + yield spaces; + yield spaces; shouldNeverBeCalled(); yield new TextEncoder().encode( '<meta property="og:description" content="should be ignored">'