|
| 1 | +import test from 'ava' |
| 2 | +import path from 'path' |
| 3 | + |
| 4 | +import loadEmscriptenModuleNode from '../../../dist/pipeline/internal/load-emscripten-module-node.js' |
| 5 | +import runPipelineEmscripten from '../../../dist/pipeline/internal/run-pipeline-emscripten.js' |
| 6 | +import InterfaceTypes from '../../../dist/interface-types/interface-types.js' |
| 7 | +import FloatTypes from '../../../dist/interface-types/float-types.js' |
| 8 | +import PixelTypes from '../../../dist/interface-types/pixel-types.js' |
| 9 | + |
| 10 | +/** |
| 11 | + * Regression test for signed pointer overflow in runPipelineEmscripten. |
| 12 | + * |
| 13 | + * When the WASM heap exceeds 2 GB, Emscripten's ccall returns pointers as |
| 14 | + * signed i32. Values above 2^31 wrap negative, causing: |
| 15 | + * |
| 16 | + * RangeError: Start offset -N is outside the bounds of the buffer |
| 17 | + * |
| 18 | + * This only happens when a module is REUSED across calls (the browser web |
| 19 | + * worker pattern). runPipelineNode creates a fresh module per call, so the |
| 20 | + * heap never accumulates. We use loadEmscriptenModuleNode + |
| 21 | + * runPipelineEmscripten directly to mirror the real-world scenario: |
| 22 | + * two large image reads on the same worker. |
| 23 | + * |
| 24 | + * Real-world trigger: VolView loading a session with a large NIfTI base |
| 25 | + * image + embedded labelmap on the same ITK-wasm web worker. |
| 26 | + */ |
| 27 | + |
| 28 | +const MEDIAN_FILTER_PATH = path.resolve( |
| 29 | + 'test', |
| 30 | + 'pipelines', |
| 31 | + 'emscripten-build', |
| 32 | + 'median-filter-pipeline', |
| 33 | + 'median-filter-test' |
| 34 | +) |
| 35 | + |
| 36 | +const createLargeFloat32Image = (dimX, dimY, dimZ) => ({ |
| 37 | + imageType: { |
| 38 | + dimension: 3, |
| 39 | + componentType: FloatTypes.Float32, |
| 40 | + pixelType: PixelTypes.Scalar, |
| 41 | + components: 1 |
| 42 | + }, |
| 43 | + name: 'large-test-image', |
| 44 | + origin: [0.0, 0.0, 0.0], |
| 45 | + spacing: [1.0, 1.0, 1.0], |
| 46 | + direction: new Float64Array([1, 0, 0, 0, 1, 0, 0, 0, 1]), |
| 47 | + size: [dimX, dimY, dimZ], |
| 48 | + data: new Float32Array(dimX * dimY * dimZ), |
| 49 | + metadata: new Map() |
| 50 | +}) |
| 51 | + |
| 52 | +test('runPipelineEmscripten with reused module — second large image triggers signed pointer overflow', async (t) => { |
| 53 | + t.timeout(300_000) |
| 54 | + |
| 55 | + const pipelineModule = await loadEmscriptenModuleNode(MEDIAN_FILTER_PATH) |
| 56 | + |
| 57 | + // Float32 640×640×512 ≈ 800 MB per image buffer. |
| 58 | + // First call grows the heap to ~1.6 GB (input + output). |
| 59 | + // Second call pushes it past 2 GB — output pointers exceed 2^31. |
| 60 | + const image = createLargeFloat32Image(640, 640, 512) |
| 61 | + |
| 62 | + const args = ['0', '0', '--radius', '1', '--memory-io'] |
| 63 | + const desiredOutputs = [{ type: InterfaceTypes.Image }] |
| 64 | + const inputs = [{ type: InterfaceTypes.Image, data: image }] |
| 65 | + |
| 66 | + // First pipeline run — grows the heap |
| 67 | + const first = runPipelineEmscripten(pipelineModule, args, desiredOutputs, inputs) |
| 68 | + t.is(first.returnValue, 0, 'first pipeline run succeeds') |
| 69 | + t.truthy(first.outputs[0].data.data, 'first run returns image data') |
| 70 | + |
| 71 | + const heapAfterFirst = pipelineModule.HEAPU8.buffer.byteLength |
| 72 | + t.log(`Heap after first run: ${(heapAfterFirst / 1024 / 1024).toFixed(0)} MB`) |
| 73 | + |
| 74 | + // Second pipeline run on the SAME module — heap accumulates |
| 75 | + const second = runPipelineEmscripten(pipelineModule, args, desiredOutputs, inputs) |
| 76 | + t.is(second.returnValue, 0, 'second pipeline run succeeds') |
| 77 | + t.truthy(second.outputs[0].data.data, 'second run returns image data') |
| 78 | + |
| 79 | + const heapAfterSecond = pipelineModule.HEAPU8.buffer.byteLength |
| 80 | + t.log(`Heap after second run: ${(heapAfterSecond / 1024 / 1024).toFixed(0)} MB`) |
| 81 | +}) |
0 commit comments