diff --git a/.github/workflows/ghpages.yml b/.github/workflows/ghpages.yml new file mode 100644 index 0000000..3780672 --- /dev/null +++ b/.github/workflows/ghpages.yml @@ -0,0 +1,21 @@ +name: Build and Deploy +on: + push: + branches: + - main +jobs: + build-and-publish-live-demo: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install and Build + run: | + npm install + npm run build + - name: Deploy + uses: JamesIves/github-pages-deploy-action@v4 + with: + branch: demo # The branch the action should deploy to. + folder: dist # The folder the action should deploy. \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..abd6c49 --- /dev/null +++ b/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2021 neuroneural/brainchop + +Ported to NiiVue 2024 NiiVue developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..6f3d608 --- /dev/null +++ b/README.md @@ -0,0 +1,46 @@ +# brain2print + +This is an extension of [brainchop](https://github.com/neuroneural/brainchop) that converts voxel-based MRI scans to 3D meshes that can be printed. No data is sent to a server. *Everything* happens in your browser window, on *your* machine. + +![image of web page](brain2print.png) + +## Usage + +1. Open the [live demo](https://niivue.github.io/brain2print/). +2. **Option 1** The web page automatically loads with a default T1 MRI scan. If you want to use this scan, go to step 5. +3. **Option 2** If your T1 MRI scan is in NIfTI format, drag and drop the file onto the web page. +4. **Option 3** If your image is in DICOM format, it may load if you drag and drop the files. If this fails, convert your images with dcm2niix. +5. Segment your brain scan by choosing a model from the `Segmentation Model` pull-down menu. Not all models with with all graphics cards. The `Tissue GWM (High Acc, Low Mem)` is a good starting point. Hopefully, it will accurately segment your brain into gray matter, white matter and cerebral spinal fluid. +6. Press the `Create Mesh` button and select your preferred settings: + + - ![settings dialog](Settings.png) + + - [Closing](https://en.wikipedia.org/wiki/Closing_(morphology)) removes small crevices and cavities in your mesh which can plague printing. + - Fill bubbles will remove any cavities, this includes large cavities for example the ventricles for a brain scan. + - The `Largest cluster only` will only extract a single mesh. + - You can choose `Smoothing` to make the surfaces less jagged (note this can create self intersecting triangles that can confound some printers). + - You can choose to `Simplify` [reduce the number of triangles](https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification) to create smaller files (note this can create self intersecting triangles that can confound some printers). +7. Once you have set your preferences, press `Apply`. +8. You will see the mesh appear and can interactively view it. If you are unhappy with the result, repeat step 6 with different settings. If you want to print the results, press the `Save Mesh` button. + +## How it Works + +This web application uses some of the latest browser technologies that allow the tissue segmentation model to run on your local GPU, regardless of the type of GPU. This is possible via the `WebGPU` browser API. Additionally, we leverage `WebAssembly` to run the `niimath` [WASM wrapper](https://www.npmjs.com/package/@niivue/niimath) to turn the tissue segmentation into a 3D mesh. No data ever leaves your machine. + +### Developers - Running a Local Live Demo + +```bash +git clone git@github.com:niivue/brain2print.git +cd brain2print +npm install +npm run dev +``` + + +### Developers - Building the Web Page + +```bash +npm run build +``` + + diff --git a/Settings.png b/Settings.png new file mode 100644 index 0000000..41a9cf6 Binary files /dev/null and b/Settings.png differ diff --git a/brain2print.png b/brain2print.png new file mode 100644 index 0000000..0291ad6 Binary files /dev/null and b/brain2print.png differ diff --git a/brainchop-mainthread.js b/brainchop-mainthread.js new file mode 100644 index 0000000..435ef30 --- /dev/null +++ b/brainchop-mainthread.js @@ -0,0 +1,2159 @@ +import * as tf from '@tensorflow/tfjs' +import { BWLabeler } from './bwlabels.js' +import { inferenceModelsList } from "./brainchop-parameters.js" +export { runInference} + +async function getModelNumParameters(modelObj) { + let numParameters = 0 + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + numParameters += modelObj.layers[layerIdx].countParams() + } + return numParameters +} + +async function getModelNumLayers(modelObj) { + return modelObj.layers.length +} + +async function isModelChnlLast(modelObj) { + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + if (modelObj.layersByDepth[layerIdx][0].dataFormat) { + return modelObj.layersByDepth[layerIdx][0].dataFormat === 'channelsLast' + } + } +} + +async function load_model(modelUrl) { + console.log('main thread load_model', modelUrl) + return await tf.loadLayersModel(modelUrl) +} + +async function getAllSlices2D(allSlices, slice_height, slice_width) { + const allSlices_2D = [] + for (let sliceIdx = 0; sliceIdx < allSlices.length; sliceIdx++) { + allSlices_2D.push(tf.tensor(allSlices[sliceIdx], [slice_height, slice_width])) + } + return allSlices_2D +} + +async function getSlices3D(allSlices_2D) { + return tf.stack(allSlices_2D) +} + +async function getAllSlicesData1D(num_of_slices, niftiHeader, niftiImage) { + // Get nifti dimensions + const cols = niftiHeader.dims[1] // Slice width + const rows = niftiHeader.dims[2] // Slice height + let typedData + if (niftiHeader.datatypeCode === 2) { + // enum from nvimage/utils DT_UINT8 = 2 + typedData = new Uint8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 4) { + // DT_INT16 = 4 + typedData = new Int16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 8) { + // DT_INT32 = 8 + typedData = new Int32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 16) { + // DT_FLOAT32 = 16 + typedData = new Float32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 64) { + // DT_FLOAT64 = 64 + typedData = new Float64Array(niftiImage) + } else if (niftiHeader.datatypeCode === 256) { + // DT_INT8 = 256 + typedData = new Int8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 512) { + // DT_UINT16 = 512 + typedData = new Uint16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 768) { + // DT_UINT32 = 768 + typedData = new Uint32Array(niftiImage) + } else { + return + } + const allSlices = [] + let offset3D = 0 + // Draw pixels + for (let slice = 0; slice < num_of_slices; slice++) { + const slice = new Array(rows * cols) + let offset2D = 0 + for (let row = 0; row < rows; row++) { + for (let col = 0; col < cols; col++) { + const value = typedData[offset3D++] + // Create 1Dim Array of pixel value, this 1 dim represents one channel + slice[offset2D++] = value & 0xff + } + } + allSlices.push(slice) + } + return allSlices +} + +async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) { + // Flatten the tensor + const flatTensor = tensor.flatten() + + // Convert the flattened tensor to an array to sort it + const flatArray = await flatTensor.array() + flatArray.sort((a, b) => a - b) // Sort the array in ascending order + + // Convert the sorted array back to a tensor + const sortedTensor = tf.tensor1d(flatArray) + + // Calculate the indices for the quantiles + const numElements = sortedTensor.shape[0] + const lowIndex = Math.floor(numElements * lowerQuantile) + const highIndex = Math.ceil(numElements * upperQuantile) - 1 // Subtract 1 because indices are 0-based + + // Slice the sorted tensor to get qmin and qmax + const qmin = sortedTensor.slice(lowIndex, 1) // Get the value at the low index + const qmax = sortedTensor.slice(highIndex, 1) // Get the value at the high index + + // Get the actual values from the tensors + const qminValue = (await qmin.array())[0] + const qmaxValue = (await qmax.array())[0] + + // Clean up tensors to free memory + flatTensor.dispose() + sortedTensor.dispose() + qmin.dispose() + qmax.dispose() + + return { qmin: qminValue, qmax: qmaxValue } +} + +async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) { + // Call calculateQuantiles and wait for the result + const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile) + + // Convert qmin and qmax back to scalars + const qminScalar = tf.scalar(qmin) + const qmaxScalar = tf.scalar(qmax) + + // Perform the operation: (tensor - qmin) / (qmax - qmin) + const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar)) + + // Dispose of the created scalars to free memory + qminScalar.dispose() + qmaxScalar.dispose() + + // Return the resulting tensor + return resultTensor +} + +async function minMaxNormalizeVolumeData(volumeData) { + // Normalize the data to the range 0 - 1 using min-max scaling + const volumeData_Max = volumeData.max() + const volumeData_Min = volumeData.min() + const normalizedSlices_3d = await volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min)) + return normalizedSlices_3d +} + +async function inferenceFullVolumeSeqCovLayer( + model, + slices_3d, + input_shape, + isChannelLast, + num_of_slices, + slice_height, + slice_width +) { + window.alert('inferenceFullVolumeSeqCovLayer() is not dead code?') +} + +async function inferenceFullVolume( + model, + slices_3d, + input_shape, + isChannelLast, + num_of_slices, + slice_height, + slice_width +) { + window.alert('inferenceFullVolume() is not dead code?') +} + +async function inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, pipeline1_out = null) { + window.alert('inferenceSubVolumes() is not dead code?') +} + +async function tensor2LightBuffer(tensor, dtype) { + window.alert('tensor2LightBuffer() is not dead code?') + // return new Buffer(tensor.shape, dtype, Array.from(tensor.dataSync()) ); +} + +async function draw3dObjBoundingVolume(unstackOutVolumeTensor) { + window.alert('draw3dObjBoundingVolume() is not dead code?') +} + +async function argMaxLarge(outVolumeBuffer, num_of_slices, slice_height, slice_width, numOfClasses, dtype = 'float32') { + window.alert('argMaxLarge() is not dead code?') +} + +async function addZeroPaddingTo3dTensor(tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + return tensor3d.pad([rowPadArr, colPadArr, depthPadArr]) +} + +async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + let h, w, d + ;[h, w, d] = tensor3d.shape + return tensor3d.slice([rowPad, colPad, depthPad], [h - 2 * rowPad, w - 2 * colPad, d - 2 * depthPad]) +} + +async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr) { + const row_pad_befor = refVoxel[0] + const col_pad_befor = refVoxel[1] + const depth_pad_befor = refVoxel[2] + // last and lower volume voxel + const row_max = row_pad_befor + boundVolSizeArr[0] - 1 // size [2, 2, 2] means 2 voxels total in each dim + const col_max = col_pad_befor + boundVolSizeArr[1] - 1 + const depth_max = depth_pad_befor + boundVolSizeArr[2] - 1 + + const row_pad_after = newHeight - row_max - 1 > 0 ? newHeight - row_max - 1 : 0 + const col_pad_after = newWidth - col_max - 1 > 0 ? newWidth - col_max - 1 : 0 + const depth_pad_after = newDepth - depth_max - 1 > 0 ? newDepth - depth_max - 1 : 0 + + return croppedTensor3d.pad([ + [row_pad_befor, row_pad_after], + [col_pad_befor, col_pad_after], + [depth_pad_befor, depth_pad_after] + ]) +} + +async function applyMriThreshold(tensor, percentage) { + // Perform asynchronous operations outside of tf.tidy + const maxTensor = tensor.max() + const thresholdTensor = maxTensor.mul(percentage) + const threshold = await thresholdTensor.data() // Extracts the threshold value + + // Dispose tensors not needed anymore + maxTensor.dispose() + thresholdTensor.dispose() + + // Use tf.tidy for synchronous operations + return tf.tidy(() => { + const dataForProcessing = tensor.clone() + + // Thresholding (assuming background has very low values compared to the head) + const mask = dataForProcessing.greater(threshold[0]) + // -- const denoisedMriData = dataForProcessing.mul(mask); + + // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto. + return mask + }) + + // -- return denoisedMriData; +} + +async function binarizeVolumeDataTensor(volumeDataTensor) { + const alpha = 0 + // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0] + return volumeDataTensor.step(alpha) +} +async function generateBrainMask( + unstackOutVolumeTensor, + num_of_slices, + slice_height, + slice_width, + modelEntry, + opts, + callbackUI, + callbackImg, + isFinalImage = true +) { + console.log('Generate Brain Masking ... ') + // Convert all slices into 1 Dim array to download + + let allOutputSlices3DCC = [] + // const allOutputSlices3DContours = [] + + // dataSync() using to flatten array. Takes around 1.5 s + for (let sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++) { + allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync()) + } + const isPreModelPostProcessEnable = modelEntry.preModelPostProcess + // let isPreModelPostProcessEnable = inferenceModelsList[$$("selectModel").getValue() - 1]["preModelPostProcess"]; + + if (isPreModelPostProcessEnable) { + console.log('Phase-1 Post processing enabled ... ') + allOutputSlices3DCC = tf.tidy(() => { + // Remove noisy regions using 3d CC + // const sliceWidth = niftiHeader.dims[1] + // const sliceHeight = niftiHeader.dims[2] + // return postProcessSlices3D(allOutputSlices3DCC, slice_height, slice_width) + const errTxt = 'postProcessSlices3D() should be upgraded to BWLabeler' + callbackUI(errTxt, -1, errTxt) + }) + console.log('Post processing done ') + } else { + console.log('Phase-1 Post processing disabled ... ') + } + // Use this conversion to download output slices as nii file. Takes around 30 ms + // does not use `push` to avoid stack overflows. In future: consider .set() with typed arrays + const allOutputSlices3DCC1DimArray = new Array(allOutputSlices3DCC[0].length * allOutputSlices3DCC.length) + let index = 0; + for (let sliceIdx = 0; sliceIdx < allOutputSlices3DCC.length; sliceIdx++) { + for (let i = 0; i < allOutputSlices3DCC[sliceIdx].length; i++) { + allOutputSlices3DCC1DimArray[index++] = allOutputSlices3DCC[sliceIdx][i]; + } + } + let brainOut = [] + + if (opts.isBrainCropMaskBased) { + // Mask-based + + const brainMaskTensor1d = await binarizeVolumeDataTensor(tf.tensor1d(allOutputSlices3DCC1DimArray)) + brainOut = Array.from(brainMaskTensor1d.dataSync()) + } else { + // Brain tissue + window.alert('getAllSlicesData1D() is not dead code? niftiHeader and niftiImage required by getAllSlicesData1D') + } + if (isFinalImage || opts.showPhase1Output) {//all done + callbackImg(brainOut, opts, modelEntry) + callbackUI('Segmentation finished', 0) + } + return tf.tensor(brainOut, [num_of_slices, slice_height, slice_width]) +} + +async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) { + // const batchSize = input.shape[0] + // const depth = input.shape[1] + // const height = input.shape[2] + // const width = input.shape[3] + const inChannels = input.shape[4] + const outChannels = filter.shape[4] + + // Create an empty array to hold the output channels + let outputChannels = null + + // Slice the input tensor and process one output channel at a time + for (let channel = 0; channel < outChannels; channel++) { + const numSlices = Math.ceil(inChannels / sliceSize) + const biasesSlice = biases.slice([channel], [1]) + let outputChannel = null + + for (let i = 0; i < numSlices; i++) { + const startChannel = i * sliceSize + const endChannel = Math.min((i + 1) * sliceSize, inChannels) + + // Only proceed if there are channels to process + if (startChannel < inChannels) { + const resultSlice = tf.tidy(() => { + const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]) + const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]) + // Perform the convolution for the current slice and output channel + return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + }) + + if (outputChannel === null) { + outputChannel = resultSlice + } else { + const updatedOutputChannel = outputChannel.add(resultSlice) + outputChannel.dispose() + resultSlice.dispose() + outputChannel = updatedOutputChannel + } + } + } + + // Add the biases to the accumulated convolutions for this channel + const biasedOutputChannel = outputChannel.add(biasesSlice) + outputChannel.dispose() + biasesSlice.dispose() + + // Accumulate the channel to the output array + if (outputChannels == null) { + outputChannels = biasedOutputChannel + } else { + const updatedOutputChannels = await tf.concat([outputChannels, biasedOutputChannel], 4) + biasedOutputChannel.dispose() + outputChannels.dispose() + outputChannels = updatedOutputChannels + } + } + + return outputChannels +} + +function processTensorInChunks(inputTensor, filterWeights, chunkSize) { + // Assuming inputTensor's shape: [batch, depth, height, width, inChannels] + // and filterWeights's shape: [filterDepth, filterHeight, filterWidth, inChannels, outChannels] + const stride = 1 + const pad = 0 + const dilationRate = 1 + const inChannels = inputTensor.shape[4] + const numSlices = Math.ceil(inChannels / chunkSize) + + let accumulatedResult = null + + for (let i = 0; i < numSlices; i++) { + const startChannel = i * chunkSize + const endChannel = Math.min((i + 1) * chunkSize, inChannels) + const channels = endChannel - startChannel + + const inputSlice = tf.tidy(() => { + // Slice the input tensor to get the current chunk + return inputTensor.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, channels]) + }) + + const filterSlice = tf.tidy(() => { + // Slice the filter weights to match the input tensor's current chunk + return filterWeights.slice([0, 0, 0, startChannel, 0], [-1, -1, -1, channels, -1]) + }) + + const resultSlice = tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + // Clean up the slices to free memory + inputSlice.dispose() + filterSlice.dispose() + + // Squeeze the result slice to remove dimensions of size 1 + const squeezedResultSlice = tf.squeeze(resultSlice) + resultSlice.dispose() // Dispose of the original resultSlice after squeezing + + if (accumulatedResult === null) { + accumulatedResult = squeezedResultSlice + } else { + // Accumulate the result by adding the new result slice to it + const newAccumulatedResult = accumulatedResult.add(squeezedResultSlice) + + // Dispose of the previous accumulatedResult and squeezedResultSlice + accumulatedResult.dispose() + // Dispose of squeezedResultSlice only if it wasn't assigned to accumulatedResult + if (accumulatedResult !== squeezedResultSlice) { + squeezedResultSlice.dispose() + } + // Update accumulatedResult with the new result + accumulatedResult = newAccumulatedResult + } + + tf.tidy(() => { + tf.matMul(tf.zeros([1, 1]), tf.zeros([1, 1])) + }) + } + + return accumulatedResult +} + +class SequentialConvLayer { + constructor(model, chunkSize, isChannelLast, callbackUI) { + this.model = model + this.outChannels = model.outputLayers[0].kernel.shape[4] + this.chunkSize = chunkSize + this.isChannelLast = isChannelLast + this.callbackUI = callbackUI // fork + } + + /** + * Apply sequential convolution layer + * @since 3.0.0 + * @member SequentialConvLayer + * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ] + * @return {promise} + * + * convLayer.rank -> 3 + * typeof(convLayer) -> "object" + * convLayer: Object { dataFormat: "channelsLast", dilationRate: Array(3) [ 1, 1, 1 ], inputSpec: Array [ {…} ], + * name: "output", padding: "same", strides: Array(3) [ 1, 1, 1 ], ...} + * + * weights.shape -> Array(5) [ 1, 1, 1, 5, 3 ] + * weights.print() + * //=> Tensor + * [[[[[0.146999 , -1.4474995, -2.8961499], + * [1.1067894, 0.6897876 , -0.7573005], + * [-0.38512 , -0.2812168, -0.8637539], + * [0.9341159, -0.0344299, -2.3668685], + * [0.1052373, 1.266812 , 0.6542516 ]]]]] + * + * biases.shape -> Array [ 3 ] + * biases.print() + * //=> Tensor + * [-0.7850812, -2.3238883, 2.1639345] + * + * for idx = 0 -> filterWeights.shape -> Array(5) [ 1, 1, 1, 5, 1 ] + * filterWeights.print() + * //=> Tensor + * [[[[[0.146999 ], + * [1.1067894], + * [-0.38512 ], + * [0.9341159], + * [0.1052373]]]]] + * + * for idx = 0 -> filterBiases.shape -> Array [1] + * filterBiases.print() + * //=> Tensor + * [-0.7850812] + + */ + + async apply(inputTensor) { + const oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD') + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) + + const self = this + // Important to avoid "undefined" class var members inside the timer. + // "this" has another meaning inside the timer. + + // document.getElementById("progressBarChild").parentElement.style.visibility = "visible"; + + return new Promise((resolve) => { + const startTime = performance.now() + + const convLayer = self.model.layers[self.model.layers.length - 1] + const weights = convLayer.getWeights()[0] // + const biases = convLayer.getWeights()[1] + const outputShape = self.isChannelLast ? inputTensor.shape.slice(1, -1) : inputTensor.shape.slice(2) + // -- e.g. outputShape : [256,256,256] or cropped Dim + // -- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W] + // -- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W] + + let outB = tf.mul(tf.ones(outputShape), -10000) + // -- e.g. outB.shape [256,256,256] + let outC = tf.zeros(outputShape) + // -- e.g. outC.shape [256,256,256] + let chIdx = 0 + + // console.log("---------------------------------------------------------"); + console.log(' channel loop') + + const seqTimer = window.setInterval(async function () { + tf.engine().startScope() // Start TensorFlow.js scope + console.log('=======================') + const memoryInfo0 = await tf.memory() + console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`) + console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`) + console.log('Channel : ', chIdx) + + const result = await tf.tidy(() => { + const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]) + // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ] + const filterBiases = biases.slice([chIdx], [1]) + // -- e.g. filterBiases.shape [1] -> Tensor [-0.7850812] + const outA = processTensorInChunks( + inputTensor, + filterWeights, + Math.min(self.chunkSize, self.outChannels) + ).add(filterBiases) + const greater = tf.greater(outA, outB) + const newoutB = tf.where(greater, outA, outB) + const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC) + // Dispose the old tensors before reassigning + tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]) + // Dummy operation to trigger cleanup + tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1]))) + return [newoutC, newoutB] + }) + + console.log('=======================') + const memoryInfo = await tf.memory() + self.callbackUI(`Iteration ${chIdx}`, chIdx / self.outChannels) + console.log(`Number of Tensors: ${memoryInfo.numTensors}`) + console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`) + + console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`) + if (memoryInfo.unreliable) { + console.log(`Unreliable: ${memoryInfo.unreliable}`) + } + // Dispose of previous values before assigning new tensors to outC and outB + if (typeof outC !== 'undefined') { + outC.dispose() + } + if (typeof outB !== 'undefined') { + outB.dispose() + } + // Assign the new values to outC and outB + outC = tf.keep(result[0]) + outB = tf.keep(result[1]) + // // Assign the new values to outC and outB + // outC = result[0]; + // outB = result[1]; + tf.engine().endScope() + + if (chIdx === self.outChannels - 1) { + window.clearInterval(seqTimer) + // document.getElementById("progressBarChild").style.width = 0 + "%"; + tf.dispose(outB) + const endTime = performance.now() + const executionTime = endTime - startTime + console.log(`Execution time for output layer: ${executionTime} milliseconds`) + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold) + resolve(outC) + } else { + chIdx++ + + // the seemingly strange sequence of operations + // below prevents tfjs from uncontrolably + // grabbing buffers, even when all tensors have + // already been disposed + + const outCShape = outC.shape + const outCdata = outC.dataSync() + const outBShape = outC.shape + const outBdata = outB.dataSync() + outC.dispose() + outB.dispose() + // tf.disposeVariables() + outC = tf.tensor(outCdata, outCShape) + outB = tf.tensor(outBdata, outBShape) + + // document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%"; + } + + // Artificially introduce a pause to allow for garbage collection to catch up + await new Promise((resolve) => setTimeout(resolve, 300)) + }, 0) + }) + } +} // <<<< End of class + +async function generateOutputSlicesV2( + img, + OutVolumeTensorShape, + OutVolumeTensorType, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage +) { + // Convert all slices into 1 Dim array + // const allOutputSlices3DContours = [] + if (opts.isPostProcessEnable) { + const BWInstance = new BWLabeler() + const dim = new Uint32Array(OutVolumeTensorShape) + const conn = 26 // Example connectivity + const binarize = true + const onlyLargestClusterPerClass = true + const [labelCount, labeledImage] = BWInstance.bwlabel(img, dim, conn, binarize, onlyLargestClusterPerClass) + for (let i = 0; i < img.length; i++) { + img[i] *= labeledImage[i] + } + } // if isPostProcessEnable + const typedArrayConstructor = { + float32: Float32Array, + int32: Int32Array + // Add other cases as needed for different dtypes + }[OutVolumeTensorType] + // Create a new TypedArray from img with the same type as outLabelVolume + const allOutputSlices3DCC1DimArray = new Uint8Array(img) + + const modelType = modelEntry.type + + // return img + switch (modelType) { + case 'Brain_Masking': { + const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length) + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + } + // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, brainMask); + // allOutputSlices3DCC1DimArray = brainMask; + // --labelsHistogramMap = null; + // maskBrainExtraction = true; + return brainMask + // break; + } + case 'Brain_Extraction': { + const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length) + // const brainData = nifti2data(rawNiftiData); + + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + // Create the mask - 1 where the value is non-zero, 0 where it is zero. + const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + // Apply the mask to the data - multiply by the mask value. + maskedData[i] = niftiImage[i] * maskValue + } + // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, maskedData); + + // Update `allOutputSlices3DCC1DimArray` if needed. + // allOutputSlices3DCC1DimArray = maskedData; + + // Other operations... + // maskBrainExtraction = true; + return maskedData + // break; + } + } + + return img +} + +async function inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + pipeline1_out, + callbackUI, + callbackImg, + statData, + niftiImage +) { + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + + console.log(' ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ') + // console.log("BOB", callbackUI); console.log("UNCLE",callbackImg); return + const quantileNorm = modelEntry.enableQuantileNorm + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + + let mask_3d + + if (pipeline1_out == null) { + // preModel is null + + // Check if thresholding the MRI to remove noisy voxels for better cropping is needed. + const autoThresholdValue = modelEntry.autoThreshold + + if (autoThresholdValue > 0 && autoThresholdValue <= 1) { + // Filtered MRI from noisy voxel below autoThresholdValue + mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue) + } else { + console.log('No valid crop threshold value') + // binarize original image + mask_3d = await slices_3d.greater([0]).asType('bool') + } + } else { + mask_3d = await pipeline1_out.greater([0]).asType('bool') + // -- pipeline1_out.dispose(); + } + + console.log(' mask_3d shape : ', mask_3d.shape) + + const coords = await tf.whereAsync(mask_3d) + // -- Get each voxel coords (x, y, z) + + mask_3d.dispose() + + const coordsArr = coords.arraySync() + + let row_min = slice_height + let row_max = 0 + let col_min = slice_width + let col_max = 0 + let depth_min = num_of_slices + let depth_max = 0 + + for (let i = 0; i < coordsArr.length; i++) { + if (row_min > coordsArr[i][0]) { + row_min = coordsArr[i][0] + } else if (row_max < coordsArr[i][0]) { + row_max = coordsArr[i][0] + } + + if (col_min > coordsArr[i][1]) { + col_min = coordsArr[i][1] + } else if (col_max < coordsArr[i][1]) { + col_max = coordsArr[i][1] + } + + if (depth_min > coordsArr[i][2]) { + depth_min = coordsArr[i][2] + } else if (depth_max < coordsArr[i][2]) { + depth_max = coordsArr[i][2] + } + } + + console.log('row min and max :', row_min, row_max) + console.log('col min and max :', col_min, col_max) + console.log('depth min and max :', depth_min, depth_max) + + // -- Reference voxel that cropped volume started slice with it + const refVoxel = [row_min, col_min, depth_min] + // -- Starting form refVoxel, size of bounding volume + const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + + coords.dispose() + + // -- Extract 3d object (e.g. brain) + const cropped_slices_3d = await slices_3d.slice( + [row_min, col_min, depth_min], + [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + ) + slices_3d.dispose() + + // -- Padding size add to cropped brain + const pad = modelEntry.cropPadding + + // Create margin around the bounding volume + let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad]) + console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape) + + cropped_slices_3d.dispose() + + if (opts.drawBoundingVolume) { + let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', testVol.shape) + + testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) + console.log(' outLabelVolume final shape after resizing : ', testVol.shape) + + draw3dObjBoundingVolume(tf.unstack(testVol)) + testVol.dispose() + + return 0 + } + + statData.Brainchop_Ver = 'FullVolume' + // model.then(function (res) { + // console.log("--->>>>", opts.drawBoundingVolume); return + const res = await model + try { + let startTime = performance.now() + const inferenceStartTime = performance.now() + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + const transpose = modelEntry.enableTranspose + const delay = modelEntry.inferenceDelay + console.log('Inference delay :', delay) + + if (transpose) { + cropped_slices_3d_w_pad = await cropped_slices_3d_w_pad.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + let i = 1 + const layersLength = res.layers.length + console.log('res.layers.length ', layersLength) + + const isChannelLast = isModelChnlLast(res) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + let adjusted_input_shape + // -- Adjust model input shape + if (isChannelLast) { + res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + res.layers[0].batchInputShape[1], + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + numOfChan + ] + } else { + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + numOfChan, + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + res.layers[0].batchInputShape[4] + ] + } + + console.log(' Model batch input shape : ', res.layers[0].batchInputShape) + // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W] + + statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape) + statData.Output_Shape = JSON.stringify(res.output.shape) + statData.Channel_Last = isChannelLast + statData.Model_Param = await getModelNumParameters(res) + statData.Model_Layers = await getModelNumLayers(res) + statData.Model = modelEntry.modelName + statData.Seq_Conv = modelEntry.enableSeqConv + statData.Extra_Info = null + + // Determine the number of output channels in the last layer of the model + // e.g. 3, 50, 104 + const outputLayer = res.layers[res.layers.length - 1] + console.log('Output Layer : ', outputLayer) + + const expected_Num_labels = isChannelLast + ? outputLayer.outputShape[outputLayer.outputShape.length - 1] + : outputLayer.outputShape[1] + console.log('Num of output channels : ', expected_Num_labels) + + const curTensor = [] + curTensor[0] = await cropped_slices_3d_w_pad.reshape(adjusted_input_shape) + // console.log("curTensor[0] :", curTensor[0].dataSync()); + + // let curProgBar = parseInt(document.getElementById("progressBar").style.width); + + const timer = window.setInterval(async function () { + try { + if (res.layers[i].activation.getClassName() !== 'linear') { + curTensor[i] = await res.layers[i].apply(curTensor[i - 1]) + } else { + curTensor[i] = await convByOutputChannelAndInputSlicing( + curTensor[i - 1], + res.layers[i].getWeights()[0], + res.layers[i].getWeights()[1], + res.layers[i].strides, + res.layers[i].padding, + res.layers[i].dilationRate, + 3 + ) // important for memory use + } + tf.dispose(curTensor[i - 1]) + } catch (err) { + const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message + callbackUI(errTxt, -1, errTxt) + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + + console.log('layer output Tensor shape : ', curTensor[i].shape) + console.log('layer count params ', res.layers[i].countParams()) + + res.layers[i].dispose() + curTensor[i - 1].dispose() + + // bork + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + if (i === layersLength - 2) { + // Stop before the last layer or classification layer. + + window.clearInterval(timer) + + // // Create an instance of SequentialConvLayer + // The second parameter is important for memory, + // the larger it is, the more memory it uses + // it was 8, but I set it to 3, got a different error + // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast); + const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI) + + // Apply the last output tensor to the seq. instance + let outputTensor = await seqConvLayer.apply(curTensor[i]) + + // -- document.getElementById("progressBarChild").style.width = 0 + "%";; + + // Dispose the previous layer input tensor + tf.dispose(curTensor[i]) + // delete the used class + // ? delete seqConvLayer; + + // You can now use 'outputTensor' as needed + console.log(' Output tensor', outputTensor) + console.log(' Output tensor shape : ', outputTensor.shape) + // Array(3) [ 256, 256, 256 ] + + if (outputTensor.shape.length !== 3) { + const msg = 'Output tensor shape should be 3 dims but it is ' + outputTensor.shape.length + callbackUI(msg, -1, msg) + } + + const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4) + + console.log(' find array max ') + const curBatchMaxLabel = await outputTensor.max().dataSync()[0] + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('Predicted num of segmentation classes', numSegClasses) + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + if (numSegClasses !== expected_Num_labels) { + const msg = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses + callbackUI(msg, -1, msg) + } + + // -- Transpose back to fit Papaya display settings + let outLabelVolume = outputTensor.reshape([ + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2] + ]) + tf.dispose(outputTensor) + + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + + outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape) + outLabelVolume = await resizeWithZeroPadding( + outLabelVolume, + num_of_slices, + slice_height, + slice_width, + refVoxel, + boundVolSizeArr + ) + console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape) + + // let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"]; + const filterOutWithPreMask = modelEntry.filterOutWithPreMask + // To clean the skull area wrongly segmented inphase-2. + if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) { + const bin = await binarizeVolumeDataTensor(pipeline1_out) + outLabelVolume = await outLabelVolume.mul(bin) + } + + startTime = performance.now() + // Generate output volume or slices + console.log('Generating correct output') + let outimg + try { + const img = await new Uint32Array(outLabelVolume.dataSync()) + const Vshape = outLabelVolume.shape + const Vtype = outLabelVolume.dtype + outimg = await generateOutputSlicesV2( + img, + Vshape, + Vtype, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage + ) + console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors) + + tf.dispose(outLabelVolume) + tf.engine().endScope() + tf.engine().disposeVariables() + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + console.log('Error while generating output: ', error) + const msg = 'Failed while generating output due to limited browser memory available' + callbackUI(msg, -1, msg) + + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + + console.log( + 'Processing the whole brain volume in tfjs for multi-class output mask took : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + + callbackUI('', -1, '', statData) + callbackUI('Segmentation finished', 0) + callbackImg(outimg, opts, modelEntry) + return 0 + } else { + i++ + } + }, delay) + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + } + // }); +} + +async function inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + pipeline1_out, + modelEntry, + statData, + opts, + callbackImg, + callbackUI, + niftiImage +) { + let outimg = [] + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log(' ---- Start FullVolume inference phase-II ---- ') + const quantileNorm = modelEntry.enableQuantileNorm + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + let mask_3d + if (pipeline1_out == null) { + // preModel is null + + // Check if thresholding the MRI to remove noisy voxels for better cropping is needed. + const autoThresholdValue = modelEntry.autoThreshold + + if (autoThresholdValue > 0 && autoThresholdValue <= 1) { + // Filtered MRI from noisy voxel below autoThresholdValue + mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue) + } else { + console.log('No valid crop threshold value') + // binarize original image + mask_3d = await slices_3d.greater([0]).asType('bool') + } + } else { + mask_3d = pipeline1_out.greater([0]).asType('bool') + // -- pipeline1_out.dispose() + } + console.log(' mask_3d shape : ', mask_3d.shape) + const coords = await tf.whereAsync(mask_3d) + // -- Get each voxel coords (x, y, z) + mask_3d.dispose() + const coordsArr = coords.arraySync() + + let row_min = slice_height + let row_max = 0 + let col_min = slice_width + let col_max = 0 + let depth_min = num_of_slices + let depth_max = 0 + + for (let i = 0; i < coordsArr.length; i++) { + if (row_min > coordsArr[i][0]) { + row_min = coordsArr[i][0] + } else if (row_max < coordsArr[i][0]) { + row_max = coordsArr[i][0] + } + + if (col_min > coordsArr[i][1]) { + col_min = coordsArr[i][1] + } else if (col_max < coordsArr[i][1]) { + col_max = coordsArr[i][1] + } + + if (depth_min > coordsArr[i][2]) { + depth_min = coordsArr[i][2] + } else if (depth_max < coordsArr[i][2]) { + depth_max = coordsArr[i][2] + } + } + + console.log('row min and max :', row_min, row_max) + console.log('col min and max :', col_min, col_max) + console.log('depth min and max :', depth_min, depth_max) + + // -- Reference voxel that cropped volume started slice with it + const refVoxel = [row_min, col_min, depth_min] + console.log('refVoxel :', refVoxel) + + // -- Starting form refVoxel, size of bounding volume + const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + + console.log('boundVolSizeArr :', boundVolSizeArr) + + coords.dispose() + + // -- Extract 3d object (e.g. brain) + const cropped_slices_3d = slices_3d.slice( + [row_min, col_min, depth_min], + [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + ) + + slices_3d.dispose() + + // -- Padding size add to cropped brain + const pad = modelEntry.cropPadding + + // Create margin around the bounding volume + let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad]) + console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape) + + cropped_slices_3d.dispose() + + // -- Test dim after padding .. + // for (let i = 0; i < cropped_slices_3d_w_pad.rank; i++) { + // if(cropped_slices_3d_w_pad.shape[i] > 256) { + // console.log(" cropped_slices_3d_w_pad > 256 ") + // } + + // } + + if (opts.drawBoundingVolume) { + let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', testVol.shape) + + testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) + console.log(' outLabelVolume final shape after resizing : ', testVol.shape) + // todo draw3dObjBoundingVolume() + draw3dObjBoundingVolume(tf.unstack(testVol)) + testVol.dispose() + + return 0 + } + + statData.Brainchop_Ver = 'FullVolume' + let startTime = performance.now() + let adjusted_input_shape = [] + const res = await model + try { + startTime = performance.now() + const inferenceStartTime = performance.now() + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + const transpose = modelEntry.enableTranspose + const delay = modelEntry.inferenceDelay + console.log('Inference delay :', delay) + + if (transpose) { + cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + let i = 1 + const layersLength = res.layers.length + console.log('res.layers.length ', layersLength) + + const isChannelLast = await isModelChnlLast(res) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + + // -- Adjust model input shape + if (isChannelLast) { + res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + res.layers[0].batchInputShape[1], + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + numOfChan + ] + } else { + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + numOfChan, + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + res.layers[0].batchInputShape[4] + ] + } + + console.log(' Model batch input shape : ', res.layers[0].batchInputShape) + // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W] + + statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape) + statData.Output_Shape = JSON.stringify(res.output.shape) + statData.Channel_Last = isChannelLast + statData.Model_Param = await getModelNumParameters(res) + statData.Model_Layers = await getModelNumLayers(res) + statData.Model = modelEntry.modelName + statData.Extra_Info = null + + const curTensor = [] + curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape) + const timer = window.setInterval(async function () { + try { + curTensor[i] = res.layers[i].apply(curTensor[i - 1]) + } catch (err) { + callbackUI(err.message, -1, err.message) + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + console.log('layer output Tensor shape : ', curTensor[i].shape) + console.log('layer count params ', res.layers[i].countParams()) + res.layers[i].dispose() + curTensor[i - 1].dispose() + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + if (i === layersLength - 1) { + window.clearInterval(timer) + + const axis = isChannelLast ? -1 : 1 + console.log(' find argmax ') + console.log('last Tensor shape : ', curTensor[i].shape) + // -- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ] + const expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1] + let prediction_argmax + + // Try for argMax with model output tensor. + + try { + const argMaxTime = performance.now() + console.log(' Try tf.argMax for fullVolume ..') + prediction_argmax = tf.argMax(curTensor[i], axis) + console.log('tf.argMax for fullVolume takes : ', ((performance.now() - argMaxTime) / 1000).toFixed(4)) + } catch (err1) { + // if channel last + if (axis === -1) { + try { + const argMaxLargeTime = performance.now() + console.log(' tf.argMax failed .. try argMaxLarge ..') + // todo tensor2LightBuffer() + const modelOutBuffer = tensor2LightBuffer( + curTensor[i].reshape([ + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2], + expected_Num_labels + ]), + 'float16' + ) + // todo argMaxLarge() + prediction_argmax = argMaxLarge( + modelOutBuffer, + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2], + expected_Num_labels, + 'float16' + ) + console.log( + 'argMaxLarge for fullVolume takes : ', + ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) + ) + } catch (err2) { + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err2.message + statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge failed' + + callbackUI('', -1, '', statData) + + return 0 + } + } else { + // if channel first .. + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err1.message + statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge not support yet channel first' + + callbackUI('', -1, '', statData) + + return 0 + } + } + + console.log(' prediction_argmax shape : ', prediction_argmax.shape) + // -- prediction_argmax.shape : [ 1, 256, 256, 256] + + const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4) + + // outputDataBeforArgmx = Array.from(prediction_argmax.dataSync()) + tf.dispose(curTensor[i]) + // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) }) + const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0] + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('numSegClasses', numSegClasses) + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + + if (numSegClasses !== expected_Num_labels) { + // errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error" + const errTxt = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses + callbackUI(errTxt, -1, errTxt) + } + + // -- Transpose back to fit Papaya display settings + let outLabelVolume = prediction_argmax.reshape([ + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2] + ]) + tf.dispose(prediction_argmax) + + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape) + outLabelVolume = await resizeWithZeroPadding( + outLabelVolume, + num_of_slices, + slice_height, + slice_width, + refVoxel, + boundVolSizeArr + ) + console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape) + + const filterOutWithPreMask = modelEntry.filterOutWithPreMask + // To clean the skull area wrongly segmented in phase-2. + if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) { + const bin = binarizeVolumeDataTensor(pipeline1_out) + outLabelVolume = outLabelVolume.mul(bin) + } + + startTime = performance.now() + // Generate output volume or slices + console.log('Generating correct output') + + try { + const img = new Uint32Array(outLabelVolume.dataSync()) + const Vshape = outLabelVolume.shape + const Vtype = outLabelVolume.dtype + tf.dispose(outLabelVolume) + tf.engine().endScope() + tf.engine().disposeVariables() + outimg = await generateOutputSlicesV2( + img, + Vshape, + Vtype, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage + ) + console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors) + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + + const errTxt = 'Failed while generating output due to limited browser memory available' + callbackUI(errTxt, -1, errTxt) + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + + // tf.engine().endScope() + tf.engine().disposeVariables() + + console.log( + 'Processing the whole brain volume in tfjs for multi-class output mask took : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + + callbackUI('', -1, '', statData) + clearInterval(timer) + callbackUI('Segmentation finished', 0) + callbackImg(outimg, opts, modelEntry) + return 0 + } + i++ + }, delay) + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + } + // }) +} + +async function inferenceFullVolumePhase1( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + isModelFullVol, + modelEntry, + statData, + opts, + callbackImg, + callbackUI, + niftiImage +) { + statData.No_SubVolumes = 1 + // load pre-model for inference first, can be null if no pre-model such as GWM models + if (modelEntry.preModelId) { + const preModel = await load_model(opts.rootURL + inferenceModelsList[modelEntry.preModelId - 1].path) + const transpose = inferenceModelsList[modelEntry.preModelId - 1].enableTranspose + const quantileNorm = inferenceModelsList[modelEntry.preModelId - 1].enableQuantileNorm + let preModel_slices_3d = null + + // -- If pre-model is not null then slices_3d mask will be generated.. + // -- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly + const slices_3d_mask = null + + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + preModel_slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + preModel_slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + + // -- Transpose MRI data to be match pytorch/keras input output + // -- Check if pre-model needs transpose.. + if (transpose) { + preModel_slices_3d = await preModel_slices_3d.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + statData.Brainchop_Ver = 'PreModel_FV' // e.g. "PreModel_FV" + + // preModel.then(function (res) { + const res = await preModel + + try { + const inferenceStartTime = performance.now() + const preModelObject = res + + // read input shape from model.json object + const preModelBatchInputShape = preModelObject.layers[0].batchInputShape + console.log(' Pre-Model batch input shape : ', preModelBatchInputShape) + + // -- Verify input shape + if (preModelBatchInputShape.length !== 5) { + const errTxt = 'The pre-model input shape must be 5D ' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + const isPreModelChannelLast = isModelChnlLast(preModelObject) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + let batch_D, batch_H, batch_W + let preModel_input_shape + if (isPreModelChannelLast) { + console.log('Pre-Model Channel Last') + if (isNaN(preModelBatchInputShape[4]) || preModelBatchInputShape[4] !== 1) { + const errTxt = 'The number of channels for pre-model input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + batch_D = preModelBatchInputShape[1] + batch_H = preModelBatchInputShape[2] + batch_W = preModelBatchInputShape[3] + + preModel_input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan] + } else { + console.log('Pre-Model Channel First') + if (isNaN(preModelBatchInputShape[1]) || preModelBatchInputShape[1] !== 1) { + const errTxt = 'The number of channels for pre-model input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + batch_D = preModelBatchInputShape[2] + batch_H = preModelBatchInputShape[3] + batch_W = preModelBatchInputShape[4] + + preModel_input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] + } + + statData.Input_Shape = JSON.stringify(preModel_input_shape) + statData.Output_Shape = JSON.stringify(preModelObject.output.shape) + statData.Channel_Last = isPreModelChannelLast + statData.Model_Param = await getModelNumParameters(preModelObject) + statData.Model_Layers = await getModelNumLayers(preModelObject) + + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + const delay = inferenceModelsList[modelEntry.preModelId - 1].inferenceDelay + + let i = 1 + const layersLength = res.layers.length + + const curTensor = [] + // -- reshape MRI to model input shape + curTensor[0] = preModel_slices_3d.reshape(preModel_input_shape) + + // Dispose the volume + tf.dispose(preModel_slices_3d) + + const timer = window.setInterval(async function () { + try { + curTensor[i] = res.layers[i].apply(curTensor[i - 1]) + } catch (err) { + const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message + callbackUI(errTxt, -1, errTxt) + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'PreModel Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + + res.layers[i].dispose() + curTensor[i - 1].dispose() + + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + + if (i === layersLength - 1) { + window.clearInterval(timer) + + // -- prediction = res.layers[res.layers.length-1].apply(curTensor[i]) + // -- curTensor[i].print() + // -- outputDataBeforArgmx = Array.from(curTensor[i].dataSync()) + + const axis = isPreModelChannelLast ? -1 : 1 + console.log(' find argmax ') + console.log('last Tensor shape : ', curTensor[i].shape) + // -- curTensor[i].shape : [ 1, 256, 256, 256, 3 ] + const expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1] + let prediction_argmax + + // Try for argMax with model output tensor. + + try { + console.log(' Try tf.argMax for fullVolume ..') + prediction_argmax = await tf.argMax(curTensor[i], axis) + } catch (err1) { + // if channel last + if (axis === -1) { + try { + const argMaxLargeTime = performance.now() + console.log(' tf.argMax failed .. try argMaxLarge ..') + const modelOutBuffer = tensor2LightBuffer( + curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), + 'float16' + ) + prediction_argmax = argMaxLarge( + modelOutBuffer, + num_of_slices, + slice_height, + slice_width, + expected_Num_labels, + 'float16' + ) + console.log( + 'argMaxLarge for fullVolume takes : ', + ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) + ) + } catch (err2) { + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err2.message + statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge failed' + + callbackUI('', -1, '', statData) + + return 0 + } + } else { + // if channel first .. + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + window.clearInterval(timer) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err1.message + statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge not support yet channel first' + + callbackUI('', -1, '', statData) + + return 0 + } + } + + console.log(' Pre-model prediction_argmax shape : ', prediction_argmax.shape) + // -- prediction_argmax.shape : [ 1, 256, 256, 256] + + const Inference_t = ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + + tf.dispose(curTensor[i]) + + console.log(' Pre-model find array max ') + const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0] + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('Pre-model numSegClasses', numSegClasses) + + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + + // -- Transpose back to fit Papaya display settings + let outLabelVolume = await prediction_argmax.reshape([num_of_slices, slice_height, slice_width]) + tf.dispose(prediction_argmax) + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('Pre-model outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + const startTime = performance.now() + // Generate output volume or slices + console.log('Generating pre-model output') + let slices_3d_mask + try { + const unstackOutVolumeTensor = await tf.unstack(outLabelVolume) + slices_3d_mask = await generateBrainMask( + unstackOutVolumeTensor, + num_of_slices, + slice_height, + slice_width, + modelEntry, + opts, + callbackUI, + callbackImg, + false + ) + await tf.dispose(outLabelVolume) + console.log(' Phase-1 num of tensors after generateBrainMask: ', tf.memory().numTensors) + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + + const errTxt = 'Failed while generating pre-model output due to limited browser memory available' + callbackUI(errTxt, -1, errTxt) + + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Pre-model failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + console.log( + 'Pre-model processing the whole brain volume in tfjs tooks for multi-class output mask : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + + callbackUI('', -1, '', statData) + + if (slices_3d_mask == null) { + const msg = 'slice_3d_mask failed ...' + callbackUI(msg, -1, msg) + return 0 + } else { + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log('--- pre-model done ---') + // --mask_3d = slices_3d_mask.greater([0]).asType('bool') + // --slices_3d_mask.dispose() + + if (isModelFullVol) { + if (modelEntry.enableSeqConv) { + // Mask cropping & seq conv + // Non-Atlas model (e.g. GWM) needs sequential convolution layer. + // Sequential convolution layer to be used after cropping - slow but reliable on most machines + console.log('------ Mask Cropping & Seq Convoluton ------') + await inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + slices_3d_mask, + callbackUI, + callbackImg, + statData, + niftiImage + ) + return 0 + // inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + } else { + // Mask cropping BUT no seq conv + console.log('------ Mask Cropping - NO Seq Convoluton ------') + await inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + slices_3d_mask, + modelEntry, + statData, + opts, + callbackImg, + callbackUI, + niftiImage + ) + // inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + } + } else { + // -- In version 3.0.0 this function not used + await inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask) + // inferenceSubVolumes(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + } + } + } + i++ + }, delay) + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + + // document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green" + + // document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green" + } + // }) + + // -- if(...) end + } else { + // No preModel + + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log('--- No pre-model is selected ---') + console.log('------ Run voxel cropping ------') + // -- mask_3d = slices_3d.greater([0]).asType('bool') + + if (isModelFullVol) { + if (modelEntry.enableSeqConv) { + // Voxel cropping & seq conv + // Non-Atlas model (e.g. GWM) needs sequential convolution layer. + // Sequential convolution layer to be used after cropping - slow but reliable on most machines + console.log('------ Seq Convoluton ------') + await inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + null, + callbackUI, + callbackImg, + statData, + niftiImage + ) + } else { + // Voxel cropping BUT no seq conv + // todo: we do not use result const outimg = await + inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + null, + modelEntry, + statData, + opts, + callbackImg, + callbackUI, + niftiImage + ) + } + } else { + // -- In version 3.0.0 this function not used + inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, null) + } + } +} + +async function enableProductionMode(textureF16Flag = true) { + // -- tf.setBackend('cpu') + // -- tf.removeBackend('cpu') + // -- Calling enableProdMode() method + await tf.enableProdMode() + // -- Setting debug mode of the environment + tf.env().set('DEBUG', false) + tf.env().set('WEBGL_FORCE_F16_TEXTURES', textureF16Flag) + // -- set this flag so that textures are deleted when tensors are disposed. + tf.env().set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) + // -- tf.env().set('WEBGL_PACK', false) + // -- Put ready after sets above + await tf.ready() + // -- Printing output + console.log('tf env() flags :', tf.env().flags) + console.log('tf env() features :', tf.env().features) + console.log('tf env total features: ', Object.keys(tf.env().features).length) + console.log(tf.getBackend()) +} + +async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) { + const statData = [] + statData.startTime = Date.now() // for common webworker/mainthread do not use performance.now() + callbackUI('Segmentation started', 0) + const startTime = performance.now() + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + if (isNaN(batchSize) || batchSize !== 1) { + const errTxt = 'The batch Size for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + if (isNaN(numOfChan) || numOfChan !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + tf.engine().startScope() + console.log('Batch size: ', batchSize) + console.log('Num of Channels: ', numOfChan) + const model = await load_model(opts.rootURL + modelEntry.path) + await enableProductionMode(true) + statData.TF_Backend = tf.getBackend() + const modelObject = model + let batchInputShape = [] + // free global variable of 16777216 voxel + // allOutputSlices3DCC1DimArray = [] + // outputSceneRendered = false + // read input shape from model.json object + batchInputShape = modelObject.layers[0].batchInputShape + console.log(' Model batch input shape : ', batchInputShape) + // -- Verify input shape + if (batchInputShape.length !== 5) { + const errTxt = 'The model input shape must be 5D' + callbackUI(errTxt, -1, errTxt) + return 0 + } + let batch_D, batch_H, batch_W + let input_shape + const slice_width = niftiHeader.dims[1] + const slice_height = niftiHeader.dims[2] + const num_of_slices = niftiHeader.dims[3] + const isChannelLast = await isModelChnlLast(modelObject) + if (isChannelLast) { + console.log('Model Channel Last') + if (isNaN(batchInputShape[4]) || batchInputShape[4] !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + batch_D = batchInputShape[1] + batch_H = batchInputShape[2] + batch_W = batchInputShape[3] + + input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan] + } else { + console.log('Model Channel First') + if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + batch_D = batchInputShape[2] + batch_H = batchInputShape[3] + batch_W = batchInputShape[4] + input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] + } + // //-- Atlas version check + // if ( (batch_D > 30) && (batch_H == 256) && (batch_W == 256) ) { + // const errTxt = "The subvolume dimension in z-axis shouldn't exceed 30 number of slices for browser limitation" + // callbackUI(errTxt, -1, errTxt) + // return 0 + // } + // --Check whether the model will make inference at once as FullVolumeModel + let isModelFullVol + if (batch_D === 256 && batch_H === 256 && batch_W === 256) { + isModelFullVol = true + } else { + isModelFullVol = false + } + statData.isModelFullVol = isModelFullVol + // Model output number of segmentations + let allSlices = await getAllSlicesData1D(num_of_slices, niftiHeader, niftiImage) + const allSlices_2D = await getAllSlices2D(allSlices, slice_height, slice_width) + // free array from mem + allSlices = null + // Get slices_3d tensor + let slices_3d = await getSlices3D(allSlices_2D) + // free tensor from mem + tf.dispose(allSlices_2D) + const transpose = modelEntry.enableTranspose + const enableCrop = modelEntry.enableCrop + if (isModelFullVol) { + if (enableCrop) { + // FullVolume with Crop option before inference .. + // pre-model to mask the volume, can also be null and the cropping will be on the MRI. + await inferenceFullVolumePhase1( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + isModelFullVol, + modelEntry, + statData, + opts, + callbackImg, + callbackUI, + niftiImage + ) + } else { + // Transpose MRI data to be match pytorch/keras input output + console.log('Cropping Disabled') + + if (transpose) { + slices_3d = slices_3d.transpose() + console.log('Input transposed') + } else { + console.log('Transpose NOT Enabled') + } + + const enableSeqConv = modelEntry.enableSeqConv + + if (enableSeqConv) { + console.log('Seq Convoluton Enabled') + await inferenceFullVolumeSeqCovLayer( + model, + slices_3d, + input_shape, + isChannelLast, + num_of_slices, + slice_height, + slice_width + ) + } else { + console.log('Seq Convoluton Disabled') + await inferenceFullVolume( + model, + slices_3d, + input_shape, + isChannelLast, + num_of_slices, + slice_height, + slice_width + ) + } + } + } +} diff --git a/brainchop-parameters.js b/brainchop-parameters.js new file mode 100644 index 0000000..cd226e3 --- /dev/null +++ b/brainchop-parameters.js @@ -0,0 +1,410 @@ +export {inferenceModelsList, brainChopOpts } + +const brainChopOpts = { + // General settings for input shape [batchSize, batch_D, batch_H, batch_W, numOfChan] + batchSize: 1, // How many batches are used during each inference iteration + numOfChan: 1, // num of channel of the input shape + isColorEnable: true, // If false, grey scale will enabled + isAutoColors: true, // If false, manualColorsRange will be in use + bgLabelValue: 0, // Semenatic Segmentation background label value + drawBoundingVolume: false, // plot bounding volume used to crop the brain + isGPU: true, //use WebGL/GPU (faster) or CPU (compatibility) + isBrainCropMaskBased: true, // Check if brain masking will be used for cropping & optional show or brain tissue will be used + showPhase1Output: false, // This will load to papaya the output of phase-1 (ie. brain mask or brain tissue) + isPostProcessEnable: true, // If true 3D Connected Components filter will apply + isContoursViewEnable: false, // If true 3D contours of the labeled regions will apply + browserArrayBufferMaxZDim: 30, // This value depends on Memory available + telemetryFlag: false, // Ethical and transparent collection of browser usage while adhering to security and privacy standards + chartXaxisStepPercent: 10, // percent from total labels on Xaxis + uiSampleName: 'BC_UI_Sample', // Sample name used by interface + atlasSelectedColorTable: 'Fire' // Select from ["Hot-and-Cold", "Fire", "Grayscale", "Gold", "Spectrum"] +} + +// Inference Models, the ids must start from 1 in sequence +const inferenceModelsList = [ + { + id: 1, + type: 'Segmentation', + path: '/models/model5_gw_ae/model.json', + modelName: '\u26A1 Tissue GWM (light)', + labelsPath: './models/model5_gw_ae/labels.json', + colorsPath: './models/model5_gw_ae/colorLUT.json', + colormapPath: './models/model5_gw_ae/colormap3.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 18, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: null, // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Gray and white matter segmentation model. Operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the subvolume model.' + }, + { + id: 2, + type: 'Segmentation', + path: '/models/model20chan3cls/model.json', + modelName: '\u{1F52A} Tissue GWM (High Acc)', + labelsPath: './models/model20chan3cls/labels.json', + colorsPath: './models/model20chan3cls/colorLUT.json', + colormapPath: './models/model20chan3cls/colormap.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: true, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed' + }, + { + id: 3, + type: 'Segmentation', + path: '/models/model20chan3cls/model.json', + modelName: '\u{1F52A} Tissue GWM (High Acc, Low Mem)', + labelsPath: './models/model20chan3cls/labels.json', + colorsPath: './models/model20chan3cls/colorLUT.json', + colormapPath: './models/model20chan3cls/colormap.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: true, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower' + }, + { + id: 4, + type: 'Atlas', + path: '/models/model30chan18cls/model.json', + modelName: '\u{1FA93} Subcortical + GWM (High Mem, Fast)', + labelsPath: './models/model30chan18cls/labels.json', + colorsPath: './models/model30chan18cls/colorLUT.json', + colormapPath: './models/model30chan18cls/colormap.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.' + }, + { + id: 5, + type: 'Atlas', + path: '/models/model30chan18cls/model.json', + modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Slow)', + labelsPath: './models/model30chan18cls/labels.json', + colorsPath: './models/model30chan18cls/colorLUT.json', + colormapPath: './models/model30chan18cls/colormap.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.' + }, + { + id: 6, + type: 'Atlas', + path: '/models/model18cls/model.json', + modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Faster)', + labelsPath: './models/model18cls/labels.json', + colorsPath: './models/model18cls/colorLUT.json', + colormapPath: './models/model18cls/colormap.json', + preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary.' + }, + { + id: 7, + type: 'Atlas', + path: '/models/model30chan18cls/model.json', + modelName: '\u{1F52A}\u{1FA93} Subcortical + GWM (Failsafe, Less Acc)', + labelsPath: './models/model30chan18cls/labels.json', + colorsPath: './models/model30chan18cls/colorLUT.json', + colormapPath: './models/model30chan18cls/colormap.json', + preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary.' + }, + { + id: 8, + type: 'Atlas', + path: '/models/model30chan50cls/model.json', + modelName: '\u{1F52A} Aparc+Aseg 50 (High Mem, Fast)', + labelsPath: './models/model30chan50cls/labels.json', + colorsPath: './models/model30chan50cls/colorLUT.json', + colormapPath: './models/model30chan50cls/colormap.json', + preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: true, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class.' + }, + { + id: 9, + type: 'Atlas', + path: '/models/model30chan50cls/model.json', + modelName: '\u{1F52A} Aparc+Aseg 50 (Low Mem, Slow)', + labelsPath: './models/model30chan50cls/labels.json', + colorsPath: './models/model30chan50cls/colorLUT.json', + colormapPath: './models/model30chan50cls/colormap.json', + preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: true, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last laye + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time.' + }, + // './models/model5_gw_ae/colorLUT.json', + { + id: 10, + type: 'Brain_Extraction', + path: '/models/model5_gw_ae/model.json', + modelName: '\u26A1 Extract the Brain (FAST)', + labelsPath: null, + colorsPath: null, + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 18, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: null, // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version.' + }, + { + id: 11, + type: 'Brain_Extraction', + path: '/models/model11_gw_ae/model.json', + modelName: '\u{1F52A} Extract the Brain (High Acc, Slow)', + labelsPath: null, + colorsPath: null, + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version.' + }, + { + id: 12, + type: 'Brain_Masking', + path: '/models/model5_gw_ae/model.json', + modelName: '\u26A1 Brain Mask (FAST)', + labelsPath: null, + colorsPath: null, + colormapPath: './models/model5_gw_ae/colormap.json', + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 17, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: null, // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version.' + }, + { + id: 13, + type: 'Brain_Masking', + path: '/models/model11_gw_ae/model.json', + modelName: '\u{1F52A} Brain Mask (High Acc, Low Mem)', + labelsPath: null, + colorsPath: null, + preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 0, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: true, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version.' + }, + { + id: 14, + type: 'Atlas', + path: '/models/model21_104class/model.json', + modelName: '\u{1F52A} Aparc+Aseg 104 (High Mem, Fast)', + labelsPath: './models/model21_104class/labels.json', + colorsPath: './models/model21_104class/colorLUT.json', + colormapPath: './models/model21_104class/colormap.json', + preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions.' + }, + { + id: 15, + type: 'Atlas', + path: '/models/model21_104class/model.json', + modelName: '\u{1F52A} Aparc+Aseg 104 (Low Mem, Slow)', + labelsPath: './models/model21_104class/labels.json', + colorsPath: './models/model21_104class/colorLUT.json', + colormapPath: './models/model21_104class/colormap.json', + preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. + isBatchOverlapEnable: false, // create extra overlap batches for inference + numOverlapBatches: 200, // Number of extra overlap batches for inference + enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched + enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use. + cropPadding: 0, // Padding size add to cropped brain + autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain + enableQuantileNorm: false, // Some models needs Quantile Normaliztion. + filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas + enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer + textureSize: 0, // Requested Texture size for the model, if unknown can be 0. + warning: + "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model. + inferenceDelay: 100, // Delay in ms time while looping layers applying. + description: + 'FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. ' + } +] // inferenceModelsList diff --git a/brainchop-telemetry.js b/brainchop-telemetry.js new file mode 100644 index 0000000..f3f1234 --- /dev/null +++ b/brainchop-telemetry.js @@ -0,0 +1,200 @@ +export {isChrome, localSystemDetails } + +async function detectBrowser() { + if (navigator.userAgent.indexOf('OPR/') > -1) { + return 'Opera' + } else if (navigator.userAgent.indexOf('Edg/') > -1) { + return 'Edge' + } else if (navigator.userAgent.indexOf('Falkon/') > -1) { + return 'Falkon' + } else if (navigator.userAgent.indexOf('Chrome/') > -1) { + return 'Chrome' + } else if (navigator.userAgent.indexOf('Firefox/') > -1) { + return 'Firefox' + } else if (navigator.userAgent.indexOf('Safari/') > -1) { + return 'Safari' + } else if (navigator.userAgent.indexOf('MSIE/') > -1 || navigator.userAgent.indexOf('rv:') > -1) { + return 'IExplorer' + } else { + return 'Unknown' + } +} + +async function detectBrowserVersion() { + if (navigator.userAgent.indexOf('OPR/') > -1) { + return parseInt(navigator.userAgent.split('OPR/')[1]) + } else if (navigator.userAgent.indexOf('Edg/') > -1) { + return parseInt(navigator.userAgent.split('Edg/')[1]) + } else if (navigator.userAgent.indexOf('Falkon/') > -1) { + return parseInt(navigator.userAgent.split('Falkon/')[1]) + } else if (navigator.userAgent.indexOf('Chrome/') > -1) { + return parseInt(navigator.userAgent.split('Chrome/')[1]) + } else if (navigator.userAgent.indexOf('Firefox/') > -1) { + return parseInt(navigator.userAgent.split('Firefox/')[1]) + } else if (navigator.userAgent.indexOf('Safari/') > -1) { + return parseInt(navigator.userAgent.split('Safari/')[1]) + } else if (navigator.userAgent.indexOf('MSIE/') > -1 || navigator.userAgent.indexOf('rv:') > -1) { + return parseInt(navigator.userAgent.split('MSIE/')[1]) + } else { + return Infinity + } +} + +async function detectOperatingSys() { + if (navigator.userAgent.indexOf('Win') > -1) { + return 'Windows' + } else if (navigator.userAgent.indexOf('Mac') > -1) { + return 'MacOS' + } else if (navigator.userAgent.indexOf('Linux') > -1) { + return 'Linux' + } else if (navigator.userAgent.indexOf('UNIX') > -1) { + return 'UNIX' + } else { + return 'Unknown' + } +} + +async function checkWebGl2(gl) { + //const gl = document.createElement('canvas').getContext('webgl2') + if (!gl) { + if (typeof WebGL2RenderingContext !== 'undefined') { + const msg = 'WebGL2 may be disabled. Please try updating video card drivers' + } else { + console.log('WebGL2 is not supported') + } + return false + } else { + console.log('WebGl2 is enabled') + return true + } +} + +async function detectGPUVendor(gl) { + //const gl = document.createElement('canvas').getContext('webgl') + let debugInfo + if (gl) { + debugInfo = gl.getExtension('WEBGL_debug_renderer_info') + if (debugInfo) { + const result = gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) + // --e.g. : NVIDIA Corporation + if (result.indexOf('(') > -1 && result.indexOf(')') > -1) { + return result.substring(result.indexOf('(') + 1, result.indexOf(')')) + } + return result + } + } + return null +} + +async function detectGPUVendor_v0(gl) { + //const gl = document.createElement('canvas').getContext('webgl') + if (gl) { + const debugInfo = gl.getExtension('WEBGL_debug_renderer_info') + return debugInfo ? gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) : null + } else { + return null + } +} + +async function detectGPUCardType_v0(gl) { + if (gl) { + if (detectBrowser() === 'Firefox') { + // -- return e.g: "GeForce GTX 980/PCIe/SSE2" + return gl.getParameter(gl.RENDERER) + } + + const debugInfo = gl.getExtension('WEBGL_debug_renderer_info') + return debugInfo ? gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) : null + } else { + return null + } +} + +async function detectGPUCardType(gl) { + let debugInfo + + if (gl) { + if (detectBrowser() === 'Firefox') { + // -- return e.g: "GeForce GTX 980/PCIe/SSE2" + return gl.getParameter(gl.RENDERER) + } + + debugInfo = gl.getExtension('WEBGL_debug_renderer_info') + + if (debugInfo) { + let result = gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) + // --e.g. : ANGLE (NVIDIA Corporation, GeForce GTX 1050 Ti/PCIe/SSE2, OpenGL 4.5.0 NVIDIA 390.144) as with Chrome + // Or: GeForce GTX 1050 Ti/PCIe/SSE2 as with fireFox + + if (result.indexOf('(') > -1 && result.indexOf(')') > -1 && result.indexOf('(R)') === -1) { + result = result.substring(result.indexOf('(') + 1, result.indexOf(')')) + + if (result.split(',').length === 3) { + return result.split(',')[1].trim() + } + } + + return result + } + } + return null +} + +async function getCPUNumCores() { + return navigator.hardwareConcurrency +} + +async function isChrome() { + return /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor) +} + +async function localSystemDetails(statData, gl = null) { + // -- Timing data to collect + const today = new Date() + if (statData.isModelFullVol) { + statData.Brainchop_Ver = 'FullVolume' + } else { + statData.Brainchop_Ver = 'SubVolumes' + } + + /* let geoData = getBrowserLocationInfo() + if(geoData) { + statData["Country"] = geoData["Country"] + statData["State"] = geoData["Region"] + statData["City"] = geoData["City"] + } else { + statData["Country"] = "" + statData["State"] = "" + statData["City"] = "" + } */ + statData.Total_t = (Date.now() - statData.startTime) / 1000.0 + delete statData.startTime + statData.Date = parseInt(today.getMonth() + 1) + '/' + today.getDate() + '/' + today.getFullYear() + statData.Browser = await detectBrowser() + statData.Browser_Ver = await detectBrowserVersion() + statData.OS = await detectOperatingSys() + statData.WebGL2 = await checkWebGl2(gl) + statData.GPU_Vendor = await detectGPUVendor(gl) + statData.GPU_Card = await detectGPUCardType(gl) + statData.GPU_Vendor_Full = await detectGPUVendor_v0(gl) + statData.GPU_Card_Full = await detectGPUCardType_v0(gl) + statData.CPU_Cores = await getCPUNumCores() + statData.Which_Brainchop = 'latest' + if (await isChrome()) { + statData.Heap_Size_MB = window.performance.memory.totalJSHeapSize / (1024 * 1024).toFixed(2) + statData.Used_Heap_MB = window.performance.memory.usedJSHeapSize / (1024 * 1024).toFixed(2) + statData.Heap_Limit_MB = window.performance.memory.jsHeapSizeLimit / (1024 * 1024).toFixed(2) + } + if (gl) { + console.log('MAX_TEXTURE_SIZE :', gl.getParameter(gl.MAX_TEXTURE_SIZE)) + console.log('MAX_RENDERBUFFER_SIZE :', gl.getParameter(gl.MAX_RENDERBUFFER_SIZE)) + // -- check to see if machine has two graphics card: one is the builtin e.g. Intel Iris Pro, the other is NVIDIA GeForce GT 750M. + // -- check browser use which one, if debugInfo is null then installed GPU is not used + const debugInfo = gl.getExtension('WEBGL_debug_renderer_info') + console.log('VENDOR WEBGL:', gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL)) + statData.Texture_Size = gl.getParameter(gl.MAX_TEXTURE_SIZE) // --returns the maximum dimension the GPU can address + } else { + statData.Texture_Size = null + } + return statData +} diff --git a/brainchop-webworker.js b/brainchop-webworker.js new file mode 100644 index 0000000..9737908 --- /dev/null +++ b/brainchop-webworker.js @@ -0,0 +1,1360 @@ +import * as tf from '@tensorflow/tfjs' +import { inferenceModelsList } from './brainchop-parameters.js' +import { + addZeroPaddingTo3dTensor, + applyMriThreshold, + binarizeVolumeDataTensor, + convByOutputChannelAndInputSlicing, + draw3dObjBoundingVolume, + firstLastNonZero3D, + generateBrainMask, + generateOutputSlicesV2, + getAllSlicesDataAsTF3D, + getModelNumLayers, + getModelNumParameters, + isModelChnlLast, + load_model, + minMaxNormalizeVolumeData, + quantileNormalizeVolumeData, + removeZeroPaddingFrom3dTensor, + resizeWithZeroPadding, + SequentialConvLayer +} from './tensor-utils.js' + +function callbackUI(message = '', progressFrac = -1, modalMessage = '', statData = []) { + let statStr = [] + if (Object.keys(statData).length > 0) { + function arrayToStr() { + const list = {} + for (const key in statData) { + list[key] = statData[key] + } + return JSON.stringify(list) + } + statStr = arrayToStr(statData) + } + self.postMessage({ + cmd: 'ui', + message, + progressFrac, + modalMessage, + statData: statStr + }) +} + +function callbackImg(img, opts, modelEntry) { + self.postMessage({ cmd: 'img', img, opts, modelEntry }) +} + +async function inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + pipeline1_out, + statData, + niftiImage +) { + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + + console.log(' ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ') + const quantileNorm = modelEntry.enableQuantileNorm + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + + let mask_3d + + if (pipeline1_out == null) { + // preModel is null + + // Check if thresholding the MRI to remove noisy voxels for better cropping is needed. + const autoThresholdValue = modelEntry.autoThreshold + + if (autoThresholdValue > 0 && autoThresholdValue <= 1) { + // Filtered MRI from noisy voxel below autoThresholdValue + mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue) + } else { + console.log('No valid crop threshold value') + // binarize original image + mask_3d = await slices_3d.greater([0]).asType('bool') + } + } else { + mask_3d = await pipeline1_out.greater([0]).asType('bool') + // -- pipeline1_out.dispose() + } + + console.log(' mask_3d shape : ', mask_3d.shape) + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) + mask_3d.dispose() + // -- Reference voxel that cropped volume started slice with it + const refVoxel = [row_min, col_min, depth_min] + // -- Starting form refVoxel, size of bounding volume + const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + + // -- Extract 3d object (e.g. brain) + const cropped_slices_3d = await slices_3d.slice( + [row_min, col_min, depth_min], + [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + ) + slices_3d.dispose() + + // -- Padding size add to cropped brain + const pad = modelEntry.cropPadding + + // Create margin around the bounding volume + let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad]) + console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape) + + cropped_slices_3d.dispose() + + if (opts.drawBoundingVolume) { + let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', testVol.shape) + + testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) + console.log(' outLabelVolume final shape after resizing : ', testVol.shape) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) + testVol.dispose() + + return 0 + } + + statData.Brainchop_Ver = 'FullVolume' + const res = await model + try { + let startTime = performance.now() + const inferenceStartTime = performance.now() + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + const transpose = modelEntry.enableTranspose + + if (transpose) { + cropped_slices_3d_w_pad = await cropped_slices_3d_w_pad.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + let i = 1 + const layersLength = res.layers.length + console.log('res.layers.length ', layersLength) + + const isChannelLast = isModelChnlLast(res) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + let adjusted_input_shape + // -- Adjust model input shape + if (isChannelLast) { + res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + res.layers[0].batchInputShape[1], + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + numOfChan + ] + } else { + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + numOfChan, + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + res.layers[0].batchInputShape[4] + ] + } + + console.log(' Model batch input shape : ', res.layers[0].batchInputShape) + // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W] + + statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape) + statData.Output_Shape = JSON.stringify(res.output.shape) + statData.Channel_Last = await isChannelLast + statData.Model_Param = await getModelNumParameters(res) + statData.Model_Layers = await getModelNumLayers(res) + statData.Model = modelEntry.modelName + statData.Seq_Conv = modelEntry.enableSeqConv + // statData.Extra_Info = null + + // Determine the number of output channels in the last layer of the model + // e.g. 3, 50, 104 + const outputLayer = res.layers[res.layers.length - 1] + console.log('Output Layer : ', outputLayer) + + const expected_Num_labels = isChannelLast + ? outputLayer.outputShape[outputLayer.outputShape.length - 1] + : outputLayer.outputShape[1] + console.log('Num of output channels x: ', expected_Num_labels) + + const curTensor = [] + curTensor[0] = await cropped_slices_3d_w_pad.reshape(adjusted_input_shape) + while (true) { + try { + if (res.layers[i].activation.getClassName() !== 'linear') { + curTensor[i] = await res.layers[i].apply(curTensor[i - 1]) + } else { + curTensor[i] = await convByOutputChannelAndInputSlicing( + curTensor[i - 1], + res.layers[i].getWeights()[0], + res.layers[i].getWeights()[1], + res.layers[i].strides, + res.layers[i].padding, + res.layers[i].dilationRate, + 3 + ) // important for memory use + } + + tf.dispose(curTensor[i - 1]) + } catch (err) { + const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message + callbackUI(errTxt, -1, errTxt) + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + + console.log('layer output Tensor shape : ', curTensor[i].shape) + console.log('layer count params ', res.layers[i].countParams()) + + res.layers[i].dispose() + curTensor[i - 1].dispose() + + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + if (i === layersLength - 2) { + // Stop before the last layer or classification layer. + + // // Create an instance of SequentialConvLayer + // The second parameter is important for memory, + // the larger it is, the more memory it uses + // it was 8, but I set it to 3, got a different error + // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast) + const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI) + + // Apply the last output tensor to the seq. instance + let outputTensor = null + const profileInfo = await tf.profile(async () => { + // Your tensor operations here + outputTensor = await seqConvLayer.apply(curTensor[i]) + }) + console.log('profileInfo : ', profileInfo) + + // -- document.getElementById("progressBarChild").style.width = 0 + "%"; + + // Dispose the previous layer input tensor + tf.dispose(curTensor[i]) + // delete the used class + // ? delete seqConvLayer + + // You can now use 'outputTensor' as needed + console.log(' Output tensor', outputTensor) + console.log(' Output tensor shape : ', outputTensor.shape) + // Array(3) [ 256, 256, 256 ] + + if (outputTensor.shape.length !== 3) { + const msg = 'Output tensor shape should be 3 dims but it is ' + outputTensor.shape.length + callbackUI(msg, -1, msg) + } + + const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4) + + console.log(' find array max ') + const curBatchMaxLabel = await outputTensor.max().dataSync()[0] + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('Predicted num of segmentation classes', numSegClasses) + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + if (numSegClasses !== expected_Num_labels) { + const msg = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses + callbackUI(msg, -1, msg) + } + + // -- Transpose back to original unpadded size + let outLabelVolume = outputTensor.reshape([ + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2] + ]) + tf.dispose(outputTensor) + + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + + outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape) + outLabelVolume = await resizeWithZeroPadding( + outLabelVolume, + num_of_slices, + slice_height, + slice_width, + refVoxel, + boundVolSizeArr + ) + console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape) + + // let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"] + const filterOutWithPreMask = modelEntry.filterOutWithPreMask + // To clean the skull area wrongly segmented inphase-2. + if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) { + const bin = await binarizeVolumeDataTensor(pipeline1_out) + outLabelVolume = await outLabelVolume.mul(bin) + } + + startTime = performance.now() + // Generate output volume or slices + console.log('Generating correct output') + let outimg + try { + const img = await new Uint32Array(outLabelVolume.dataSync()) + const Vshape = outLabelVolume.shape + const Vtype = outLabelVolume.dtype + outimg = await generateOutputSlicesV2( + img, + Vshape, + Vtype, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage + ) + console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors) + + tf.dispose(outLabelVolume) + tf.engine().endScope() + tf.engine().disposeVariables() + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + console.log('Error while generating output: ', error) + const msg = 'Failed while generating output due to limited browser memory available' + callbackUI(msg, -1, msg) + + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + + console.log( + 'Processing the whole brain volume in tfjs for multi-class output mask took : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + + callbackUI('', -1, '', statData) + callbackUI('Segmentation finished', 0) + callbackImg(outimg, opts, modelEntry) + return 0 + } else { + i++ + } + } + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + } +} + +async function inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + pipeline1_out, + modelEntry, + statData, + opts, + niftiImage +) { + let outimg = [] + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log(' ---- Start FullVolume inference phase-II ---- ') + const quantileNorm = modelEntry.enableQuantileNorm + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + let mask_3d + if (pipeline1_out == null) { + // preModel is null + + // Check if thresholding the MRI to remove noisy voxels for better cropping is needed. + const autoThresholdValue = modelEntry.autoThreshold + + if (autoThresholdValue > 0 && autoThresholdValue <= 1) { + // Filtered MRI from noisy voxel below autoThresholdValue + mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue) + } else { + console.log('No valid crop threshold value') + // binarize original image + mask_3d = await slices_3d.greater([0]).asType('bool') + } + } else { + mask_3d = await pipeline1_out.greater([0]).asType('bool') + // -- pipeline1_out.dispose() + } + console.log(' mask_3d shape : ', mask_3d.shape) + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) + mask_3d.dispose() + // -- Reference voxel that cropped volume started slice with it + const refVoxel = [row_min, col_min, depth_min] + console.log('refVoxel :', refVoxel) + + // -- Starting form refVoxel, size of bounding volume + const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + + console.log('boundVolSizeArr :', boundVolSizeArr) + // -- Extract 3d object (e.g. brain) + const cropped_slices_3d = slices_3d.slice( + [row_min, col_min, depth_min], + [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] + ) + + slices_3d.dispose() + + // -- Padding size add to cropped brain + const pad = modelEntry.cropPadding + + // Create margin around the bounding volume + let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad], [pad, pad], [pad, pad]) + console.log(' cropped slices_3d with padding shape: ', cropped_slices_3d_w_pad.shape) + + cropped_slices_3d.dispose() + + // -- Test dim after padding .. + // for (let i = 0; i < cropped_slices_3d_w_pad.rank; i++) { + // if(cropped_slices_3d_w_pad.shape[i] > 256) { + // console.log(" cropped_slices_3d_w_pad > 256 ") + // } + + // } + + if (opts.drawBoundingVolume) { + let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', testVol.shape) + + testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) + console.log(' outLabelVolume final shape after resizing : ', testVol.shape) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) + testVol.dispose() + + return 0 + } + + statData.Brainchop_Ver = 'FullVolume' + let startTime = performance.now() + let adjusted_input_shape = [] + const res = await model + try { + startTime = performance.now() + const inferenceStartTime = performance.now() + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + const transpose = modelEntry.enableTranspose + + if (transpose) { + cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + let i = 1 + const layersLength = res.layers.length + console.log('res.layers.length ', layersLength) + + const isChannelLast = isModelChnlLast(res) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + + // -- Adjust model input shape + if (isChannelLast) { + res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + res.layers[0].batchInputShape[1], + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + numOfChan + ] + } else { + res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0] + res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1] + res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2] + + adjusted_input_shape = [ + batchSize, + numOfChan, + res.layers[0].batchInputShape[2], + res.layers[0].batchInputShape[3], + res.layers[0].batchInputShape[4] + ] + } + + console.log(' Model batch input shape : ', res.layers[0].batchInputShape) + // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W] + + statData.Input_Shape = JSON.stringify(res.layers[0].batchInputShape) + statData.Output_Shape = JSON.stringify(res.output.shape) + statData.Channel_Last = await isChannelLast + statData.Model_Param = await getModelNumParameters(res) + statData.Model_Layers = await getModelNumLayers(res) + statData.Model = modelEntry.modelName + // statData.Extra_Info = null + + const curTensor = [] + curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape) + // console.log("curTensor[0] :", curTensor[0].dataSync()) + + while (true) { + try { + // -- curTensor[i] = res.layers[i].apply( curTensor[i-1]) + curTensor[i] = res.layers[i].apply(curTensor[i - 1]) + } catch (err) { + callbackUI(err.message, -1, err.message) + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + console.log('layer output Tensor shape : ', curTensor[i].shape) + console.log('layer count params ', res.layers[i].countParams()) + res.layers[i].dispose() + curTensor[i - 1].dispose() + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + + if (i === layersLength - 1) { + // prediction = res.layers[res.layers.length-1].apply(curTensor[i]) + // curTensor[i].print() + // outputDataBeforArgmx = Array.from(curTensor[i].dataSync()) + + const axis = isChannelLast ? -1 : 1 + console.log(' find argmax ') + console.log('last Tensor shape : ', curTensor[i].shape) + // -- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ] + const expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1] + let prediction_argmax + + // Try for argMax with model output tensor. + + try { + const argMaxTime = performance.now() + console.log(' Try tf.argMax for fullVolume ..') + prediction_argmax = tf.argMax(curTensor[i], axis) + console.log('tf.argMax for fullVolume takes : ', ((performance.now() - argMaxTime) / 1000).toFixed(4)) + } catch (err1) { + // if channel last + if (axis === -1) { + try { + const argMaxLargeTime = performance.now() + console.log(' tf.argMax failed .. try argMaxLarge ..') + callbackUI('', -1, 'tensor2LightBuffer() is not dead code?') + callbackUI('', -1, 'argMaxLarge() is not dead code?') + console.log( + 'argMaxLarge for fullVolume takes : ', + ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) + ) + } catch (err2) { + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err2.message + statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge failed' + + callbackUI('', -1, '', statData) + return 0 + } + } else { + // if channel first .. + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err1.message + statData.Extra_Err_Info = 'prediction_argmax from argMaxLarge not support yet channel first' + + callbackUI('', -1, '', statData) + + return 0 + } + } + + console.log(' prediction_argmax shape : ', prediction_argmax.shape) + // -- prediction_argmax.shape : [ 1, 256, 256, 256] + + const Inference_t = ((performance.now() - startTime) / 1000).toFixed(4) + + // outputDataBeforArgmx = Array.from(prediction_argmax.dataSync()) + tf.dispose(curTensor[i]) + console.log(' find array max ') + const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0] + + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('numSegClasses', numSegClasses) + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + + if (numSegClasses !== expected_Num_labels) { + // errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error" + const errTxt = 'expected ' + expected_Num_labels + ' labels, but the predicted are ' + numSegClasses + callbackUI(errTxt, -1, errTxt) + } + + // -- Transpose back to original unpadded size + let outLabelVolume = prediction_argmax.reshape([ + cropped_slices_3d_w_pad.shape[0], + cropped_slices_3d_w_pad.shape[1], + cropped_slices_3d_w_pad.shape[2] + ]) + tf.dispose(prediction_argmax) + + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad) + console.log(' outLabelVolume without padding shape : ', outLabelVolume.shape) + outLabelVolume = await resizeWithZeroPadding( + outLabelVolume, + num_of_slices, + slice_height, + slice_width, + refVoxel, + boundVolSizeArr + ) + console.log(' outLabelVolume final shape after resizing : ', outLabelVolume.shape) + + const filterOutWithPreMask = modelEntry.filterOutWithPreMask + // To clean the skull area wrongly segmented in phase-2. + if (pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) { + const bin = binarizeVolumeDataTensor(pipeline1_out) + outLabelVolume = outLabelVolume.mul(bin) + } + + startTime = performance.now() + // Generate output volume or slices + console.log('Generating correct output') + + try { + const img = new Uint32Array(outLabelVolume.dataSync()) + const Vshape = outLabelVolume.shape + const Vtype = outLabelVolume.dtype + tf.dispose(outLabelVolume) + tf.engine().endScope() + tf.engine().disposeVariables() + outimg = await generateOutputSlicesV2( + img, + Vshape, + Vtype, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage + ) + console.log(' Phase-2 num of tensors after generateOutputSlicesV2: ', tf.memory().numTensors) + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + + const errTxt = 'Failed while generating output due to limited browser memory available' + callbackUI(errTxt, -1, errTxt) + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + + tf.engine().disposeVariables() + + console.log( + 'Processing the whole brain volume in tfjs for multi-class output mask took : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + callbackUI('Segmentation finished', 0) + callbackUI('', -1, '', statData) + callbackImg(outimg, opts, modelEntry) + + return 0 + } + i++ + } + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + } +} + +async function inferenceFullVolumePhase1( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + isModelFullVol, + modelEntry, + statData, + opts, + niftiHeader, + niftiImage +) { + statData.No_SubVolumes = 1 + // load pre-model for inference first, can be null if no pre-model such as GWM models + if (modelEntry.preModelId) { + const preModel = await load_model(opts.rootURL + inferenceModelsList[modelEntry.preModelId - 1].path) + const transpose = inferenceModelsList[modelEntry.preModelId - 1].enableTranspose + const quantileNorm = inferenceModelsList[modelEntry.preModelId - 1].enableQuantileNorm + let preModel_slices_3d = null + + // -- If pre-model is not null then slices_3d mask will be generated.. + // -- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly + const slices_3d_mask = null + + if (quantileNorm) { + // Quantile normalize function needs specific models to be used + console.log('preModel Quantile normalization enabled') + preModel_slices_3d = await quantileNormalizeVolumeData(slices_3d) + } else { + // Min Max Nomalize MRI data to be from 0 to 1 + console.log('preModel Min Max normalization enabled') + preModel_slices_3d = await minMaxNormalizeVolumeData(slices_3d) + } + + // -- Transpose MRI data to be match pytorch/keras input output + // -- Check if pre-model needs transpose.. + if (transpose) { + preModel_slices_3d = preModel_slices_3d.transpose() + console.log('Input transposed for pre-model') + } else { + console.log('Transpose not enabled for pre-model') + } + + statData.Brainchop_Ver = 'PreModel_FV' // e.g. "PreModel_FV" + + // preModel.then(function (res) { + const res = await preModel + + try { + const inferenceStartTime = performance.now() + const preModelObject = res + + // read input shape from model.json object + const preModelBatchInputShape = preModelObject.layers[0].batchInputShape + console.log(' Pre-Model batch input shape : ', preModelBatchInputShape) + + // -- Verify input shape + if (preModelBatchInputShape.length !== 5) { + const errTxt = 'The pre-model input shape must be 5D ' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + const isPreModelChannelLast = await isModelChnlLast(preModelObject) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + let batch_D, batch_H, batch_W + let preModel_input_shape + if (isPreModelChannelLast) { + console.log('Pre-Model Channel Last') + if (isNaN(preModelBatchInputShape[4]) || preModelBatchInputShape[4] !== 1) { + const errTxt = 'The number of channels for pre-model input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + batch_D = preModelBatchInputShape[1] + batch_H = preModelBatchInputShape[2] + batch_W = preModelBatchInputShape[3] + + preModel_input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan] + } else { + console.log('Pre-Model Channel First') + if (isNaN(preModelBatchInputShape[1]) || preModelBatchInputShape[1] !== 1) { + const errTxt = 'The number of channels for pre-model input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + + batch_D = preModelBatchInputShape[2] + batch_H = preModelBatchInputShape[3] + batch_W = preModelBatchInputShape[4] + + preModel_input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] + } + + statData.Input_Shape = JSON.stringify(preModel_input_shape) + statData.Output_Shape = JSON.stringify(preModelObject.output.shape) + statData.Channel_Last = await isPreModelChannelLast + statData.Model_Param = await getModelNumParameters(preModelObject) + statData.Model_Layers = await getModelNumLayers(preModelObject) + + // maxLabelPredicted in whole volume of the brain + let maxLabelPredicted = 0 + + let i = 1 + const layersLength = res.layers.length + + const curTensor = [] + // -- reshape MRI to model input shape + curTensor[0] = preModel_slices_3d.reshape(preModel_input_shape) + + // Dispose the volume + tf.dispose(preModel_slices_3d) + while (true) { + try { + curTensor[i] = res.layers[i].apply(curTensor[i - 1]) + } catch (err) { + const errTxt = 'Your graphics card (e.g. Intel) may not be compatible with WebGL. ' + err.message + callbackUI(errTxt, -1, errTxt) + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err.message + statData.Extra_Err_Info = 'PreModel Failed while model layer ' + i + ' apply' + + callbackUI('', -1, '', statData) + + return 0 + } + + res.layers[i].dispose() + curTensor[i - 1].dispose() + + callbackUI('Layer ' + i.toString(), (i + 1) / layersLength) + if (tf.memory().unreliable) { + const unreliableReasons = 'unreliable reasons :' + tf.memory().reasons + callbackUI(unreliableReasons, NaN, unreliableReasons) + } + + if (i === layersLength - 1) { + // -- prediction = res.layers[res.layers.length-1].apply(curTensor[i]) + // -- curTensor[i].print() + // -- outputDataBeforArgmx = Array.from(curTensor[i].dataSync()) + + const axis = isPreModelChannelLast ? -1 : 1 + console.log(' find argmax ') + console.log('last Tensor shape : ', curTensor[i].shape) + // -- curTensor[i].shape : [ 1, 256, 256, 256, 3 ] + const expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1] + let prediction_argmax + + // Try for argMax with model output tensor. + + try { + console.log(' Try tf.argMax for fullVolume ..') + prediction_argmax = await tf.argMax(curTensor[i], axis) + } catch (err1) { + // if channel last + if (axis === -1) { + try { + const argMaxLargeTime = performance.now() + console.log(' tf.argMax failed .. try argMaxLarge ..') + callbackUI('', -1, 'tensor2LightBuffer() is not dead code?') + callbackUI('', -1, 'argMaxLarge() is not dead code?') + console.log( + 'argMaxLarge for fullVolume takes : ', + ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) + ) + } catch (err2) { + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err2.message + statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge failed' + + callbackUI('', -1, '', statData) + + return 0 + } + } else { + // if channel first .. + const errTxt = "argMax buffer couldn't be created due to limited memory resources." + callbackUI(errTxt, -1, errTxt) + + prediction_argmax.dispose() + + tf.engine().endScope() + tf.engine().disposeVariables() + + statData.Inference_t = Infinity + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = err1.message + statData.Extra_Err_Info = 'preModel prediction_argmax from argMaxLarge not support yet channel first' + + callbackUI('', -1, '', statData) + + return 0 + } + } + + console.log(' Pre-model prediction_argmax shape : ', prediction_argmax.shape) + // -- prediction_argmax.shape : [ 1, 256, 256, 256] + + const Inference_t = ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + + tf.dispose(curTensor[i]) + + console.log(' Pre-model find array max ') + const curBatchMaxLabel = await prediction_argmax.max().dataSync()[0] + + if (maxLabelPredicted < curBatchMaxLabel) { + maxLabelPredicted = curBatchMaxLabel + } + + const numSegClasses = maxLabelPredicted + 1 + console.log('Pre-model numSegClasses', numSegClasses) + + statData.Actual_Labels = numSegClasses + statData.Expect_Labels = expected_Num_labels + statData.NumLabels_Match = numSegClasses === expected_Num_labels + + // -- Transpose back to original unpadded size + let outLabelVolume = await prediction_argmax.reshape([num_of_slices, slice_height, slice_width]) + tf.dispose(prediction_argmax) + // Transpose MRI data to be match pytorch/keras input output + if (transpose) { + console.log('Pre-model outLabelVolume transposed') + outLabelVolume = outLabelVolume.transpose() + } + const startTime = performance.now() + // Generate output volume or slices + console.log('Generating pre-model output') + let slices_3d_mask + try { + const unstackOutVolumeTensor = await tf.unstack(outLabelVolume) + slices_3d_mask = await generateBrainMask( + unstackOutVolumeTensor, + num_of_slices, + slice_height, + slice_width, + modelEntry, + opts, + niftiHeader, + niftiImage, + false + ) + await tf.dispose(outLabelVolume) + console.log(' Phase-1 num of tensors after generateBrainMask: ', tf.memory().numTensors) + } catch (error) { + // -- Timing data to collect + tf.engine().endScope() + tf.engine().disposeVariables() + + const errTxt = 'Failed while generating pre-model output due to limited browser memory available' + callbackUI(errTxt, -1, errTxt) + + statData.Inference_t = Inference_t + statData.Postprocess_t = Infinity + statData.Status = 'Fail' + statData.Error_Type = error.message + statData.Extra_Err_Info = 'Pre-model failed while generating output' + + callbackUI('', -1, '', statData) + + return 0 + } + const Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4) + console.log( + 'Pre-model processing the whole brain volume in tfjs tooks for multi-class output mask : ', + ((performance.now() - inferenceStartTime) / 1000).toFixed(4) + ' Seconds' + ) + + // -- Timing data to collect + statData.Inference_t = Inference_t + statData.Postprocess_t = Postprocess_t + statData.Status = 'OK' + + callbackUI('', -1, '', statData) + + if (slices_3d_mask == null) { + const msg = 'slice_3d_mask failed ...' + callbackUI(msg, -1, msg) + return 0 + } else { + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log('--- pre-model done ---') + // --mask_3d = slices_3d_mask.greater([0]).asType('bool') + // --slices_3d_mask.dispose() + + if (isModelFullVol) { + if (modelEntry.enableSeqConv) { + // Mask cropping & seq conv + // Non-Atlas model (e.g. GWM) needs sequential convolution layer. + // Sequential convolution layer to be used after cropping - slow but reliable on most machines + console.log('------ Mask Cropping & Seq Convoluton ------') + await inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + slices_3d_mask, + statData, + niftiImage + ) + return 0 + // inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + } else { + // Mask cropping BUT no seq conv + console.log('------ Mask Cropping - NO Seq Convoluton ------') + await inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + slices_3d_mask, + modelEntry, + statData, + opts, + niftiImage + ) + // inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + } + } else { + // -- In version 3.0.0 this function not used + callbackUI('', -1, 'inferenceSubVolumes() is not dead code?') + } + } + } + i++ + } + } catch (err) { + callbackUI(err.message, -1, err.message) + console.log( + 'If webgl context is lost, try to restore webgl context by visit the link ' + + 'here' + ) + + // document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green" + // document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green" + } + // }) + + // -- if(...) end + } else { + // No preModel + + // --Phase-2, After remove the skull try to allocate brain volume and make inferece + console.log('--- No pre-model is selected ---') + console.log('------ Run voxel cropping ------') + // -- mask_3d = slices_3d.greater([0]).asType('bool') + + if (isModelFullVol) { + if (modelEntry.enableSeqConv) { + // Voxel cropping & seq conv + // Non-Atlas model (e.g. GWM) needs sequential convolution layer. + // Sequential convolution layer to be used after cropping - slow but reliable on most machines + console.log('------ Seq Convoluton ------') + await inferenceFullVolumeSeqCovLayerPhase2( + opts, + modelEntry, + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + null, + statData, + niftiImage + ) + } else { + // Voxel cropping BUT no seq conv + // todo: we do not use result const outimg = await + inferenceFullVolumePhase2( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + null, + modelEntry, + statData, + opts, + niftiImage + ) + } + } else { + // -- In version 3.0.0 this function not used + callbackUI('', -1, 'inferenceSubVolumes() is not dead code?') + } + } +} + +async function enableProductionMode(textureF16Flag = true, isNvidia = true) { + // -- tf.setBackend('cpu') + tf.setBackend('webgl') + // -- tf.removeBackend('cpu') + // -- Calling enableProdMode() method + await tf.enableProdMode() + // -- Setting debug mode of the environment + tf.env().set('DEBUG', false) + tf.env().set('WEBGL_FORCE_F16_TEXTURES', textureF16Flag) + // NVidia requires -1, Intel GPUs require 0. + if (isNvidia) { + tf.env().set('WEBGL_DELETE_TEXTURE_THRESHOLD', -1) + } else { + tf.env().set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) + } + // -- tf.env().set('WEBGL_PACK', false) + // -- Put ready after sets above + await tf.ready() + // -- Printing output + console.log('tf env() flags :', JSON.stringify(tf.env().flags, null, 2)) + console.log('tf env() features :', JSON.stringify(tf.env().features, null, 2)) + console.log('tf env total features: ', Object.keys(tf.env().features).length) + console.log('tf backend: ', tf.getBackend()) +} + +async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) { + const statData = [] + statData.startTime = Date.now() // for common webworker/mainthread do not use performance.now() + callbackUI('Segmentation started', 0) + const batchSize = opts.batchSize + const numOfChan = opts.numOfChan + if (isNaN(batchSize) || batchSize !== 1) { + const errTxt = 'The batch Size for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + if (isNaN(numOfChan) || numOfChan !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + tf.engine().startScope() + console.log('Batch size: ', batchSize) + console.log('Num of Channels: ', numOfChan) + const model = await load_model(opts.rootURL + modelEntry.path) + await enableProductionMode(true, (model && modelEntry.isNvidia === true)) + statData.TF_Backend = tf.getBackend() + const modelObject = model + let batchInputShape = [] + // free global variable of 16777216 voxel + // allOutputSlices3DCC1DimArray = [] + // outputSceneRendered = false + // read input shape from model.json object + batchInputShape = modelObject.layers[0].batchInputShape + console.log(' Model batch input shape : ', batchInputShape) + // -- Verify input shape + if (batchInputShape.length !== 5) { + const errTxt = 'The model input shape must be 5D' + callbackUI(errTxt, -1, errTxt) + return 0 + } + let batch_D, batch_H, batch_W + const slice_width = niftiHeader.dims[1] + const slice_height = niftiHeader.dims[2] + const num_of_slices = niftiHeader.dims[3] + const isChannelLast = await isModelChnlLast(modelObject) + if (isChannelLast) { + console.log('Model Channel Last') + if (isNaN(batchInputShape[4]) || batchInputShape[4] !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + batch_D = batchInputShape[1] + batch_H = batchInputShape[2] + batch_W = batchInputShape[3] + } else { + console.log('Model Channel First') + if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) { + const errTxt = 'The number of channels for input shape must be 1' + callbackUI(errTxt, -1, errTxt) + return 0 + } + batch_D = batchInputShape[2] + batch_H = batchInputShape[3] + batch_W = batchInputShape[4] + } + // const input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] + // --Check whether the model will make inference at once as FullVolumeModel + let isModelFullVol + if (batch_D === 256 && batch_H === 256 && batch_W === 256) { + isModelFullVol = true + } else { + isModelFullVol = false + } + statData.isModelFullVol = isModelFullVol + // Model output number of segmentations + let slices_3d = await getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage) + const transpose = modelEntry.enableTranspose + const enableCrop = modelEntry.enableCrop + if (isModelFullVol) { + if (enableCrop) { + // FullVolume with Crop option before inference .. + // pre-model to mask the volume, can also be null and the cropping will be on the MRI. + await inferenceFullVolumePhase1( + model, + slices_3d, + num_of_slices, + slice_height, + slice_width, + isModelFullVol, + modelEntry, + statData, + opts, + niftiHeader, + niftiImage + ) + } else { + // Transpose MRI data to be match pytorch/keras input output + console.log('Cropping Disabled') + + if (transpose) { + slices_3d = slices_3d.transpose() + console.log('Input transposed') + } else { + console.log('Transpose NOT Enabled') + } + + const enableSeqConv = modelEntry.enableSeqConv + + if (enableSeqConv) { + callbackUI('', -1, 'inferenceFullVolumeSeqCovLayer() is not dead code?') + } else { + callbackUI('', -1, 'inferenceFullVolume() is not dead code?') + } + } + } +} + +self.addEventListener( + 'message', + function (event) { + runInferenceWW(event.data.opts, event.data.modelEntry, event.data.niftiHeader, event.data.niftiImage) + }, + false +) diff --git a/bwlabels.js b/bwlabels.js new file mode 100644 index 0000000..d83dc7b --- /dev/null +++ b/bwlabels.js @@ -0,0 +1,278 @@ +export class BWLabeler { + // port of https://github.com/rordenlab/niimath/blob/master/src/bwlabel.c + // return voxel address given row A, column B, and slice C + idx(A, B, C, DIM) { + return C * DIM[0] * DIM[1] + B * DIM[0] + A + } // idx() + + // determine if voxels below candidate voxel have already been assigned a label + check_previous_slice(bw, il, r, c, sl, dim, conn, tt, nabo, tn) { + let nr_set = 0 + if (!sl) { + return 0 + } + const val = bw[this.idx(r, c, sl, dim)] + if (conn >= 6) { + const idx = this.idx(r, c, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (conn >= 18) { + if (r) { + const idx = this.idx(r - 1, c, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (c) { + const idx = this.idx(r, c - 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (r < dim[0] - 1) { + const idx = this.idx(r + 1, c, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (c < dim[1] - 1) { + const idx = this.idx(r, c + 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + } + if (conn === 26) { + if (r && c) { + const idx = this.idx(r - 1, c - 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (r < dim[0] - 1 && c) { + const idx = this.idx(r + 1, c - 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (r && c < dim[1] - 1) { + const idx = this.idx(r - 1, c + 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (r < dim[0] - 1 && c < dim[1] - 1) { + const idx = this.idx(r + 1, c + 1, sl - 1, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + } + if (nr_set) { + this.fill_tratab(tt, nabo, nr_set, tn) + return nabo[0] + } else { + return 0 + } + } // check_previous_slice() + + // provisionally label all voxels in volume + do_initial_labelling(bw, dim, conn) { + const naboPS = new Uint32Array(32) + const tn = new Uint32Array(32) + let label = 1 + const kGrowArrayBy = 8192 + let ttn = kGrowArrayBy + let tt = new Uint32Array(ttn).fill(0) + const il = new Uint32Array(dim[0] * dim[1] * dim[2]).fill(0) + const nabo = new Uint32Array(27) + for (let sl = 0; sl < dim[2]; sl++) { + for (let c = 0; c < dim[1]; c++) { + for (let r = 0; r < dim[0]; r++) { + let nr_set = 0 + const val = bw[this.idx(r, c, sl, dim)] + if (val === 0) { + continue + } + nabo[0] = this.check_previous_slice(bw, il, r, c, sl, dim, conn, tt, naboPS, tn) + if (nabo[0]) { + nr_set += 1 + } + if (conn >= 6) { + if (r) { + const idx = this.idx(r - 1, c, sl, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (c) { + const idx = this.idx(r, c - 1, sl, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + } + if (conn >= 18) { + if (c && r) { + const idx = this.idx(r - 1, c - 1, sl, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + if (c && r < dim[0] - 1) { + const idx = this.idx(r + 1, c - 1, sl, dim) + if (val === bw[idx]) { + nabo[nr_set++] = il[idx] + } + } + } + if (nr_set) { + il[this.idx(r, c, sl, dim)] = nabo[0] + this.fill_tratab(tt, nabo, nr_set, tn) + } else { + il[this.idx(r, c, sl, dim)] = label + if (label >= ttn) { + ttn += kGrowArrayBy + const ext = new Uint32Array(ttn) + ext.set(tt) + tt = ext + } + tt[label - 1] = label + label++ + } + } + } + } + for (let i = 0; i < label - 1; i++) { + let j = i + while (tt[j] !== j + 1) { + j = tt[j] - 1 + } + tt[i] = j + 1 + } + return [label - 1, tt, il] + } // do_initial_labelling() + + // translation table unifies a region that has been assigned multiple classes + fill_tratab(tt, nabo, nr_set, tn) { + // let cntr = 0 + //tn.fill(0) + const INT_MAX = 2147483647 + let ltn = INT_MAX + for (let i = 0; i < nr_set; i++) { + let j = nabo[i] + // cntr = 0 + while (tt[j - 1] !== j) { + j = tt[j - 1] + /* cntr++ + if (cntr > 100) { + console.log('\nOoh no!!') + break + } */ + } + tn[i] = j + ltn = Math.min(ltn, j) + } + for (let i = 0; i < nr_set; i++) { + tt[tn[i] - 1] = ltn + } + } // fill_tratab() + + // remove any residual gaps so label numbers are dense rather than sparse + translate_labels(il, dim, tt, ttn) { + const nvox = dim[0] * dim[1] * dim[2] + let ml = 0 + const l = new Uint32Array(nvox).fill(0) + for (let i = 0; i < ttn; i++) { + ml = Math.max(ml, tt[i]) + } + const fl = new Uint32Array(ml).fill(0) + let cl = 0 + for (let i = 0; i < nvox; i++) { + if (il[i]) { + if (!fl[tt[il[i] - 1] - 1]) { + cl += 1 + fl[tt[il[i] - 1] - 1] = cl + } + l[i] = fl[tt[il[i] - 1] - 1] + } + } + return [cl, l] + } // translate_labels() + + // retain only the largest cluster for each region + largest_original_cluster_labels(bw, cl, ls) { + const nvox = bw.length + const ls2bw = new Uint32Array(cl + 1).fill(0) + const sumls = new Uint32Array(cl + 1).fill(0) + for (let i = 0; i < nvox; i++) { + const bwVal = bw[i] + const lsVal = ls[i] + ls2bw[lsVal] = bwVal + sumls[lsVal]++ + } + let mxbw = 0 + for (let i = 0; i < cl + 1; i++) { + const bwVal = ls2bw[i] + mxbw = Math.max(mxbw, bwVal) + // see if this is largest cluster of this bw-value + for (let j = 0; j < cl + 1; j++) { + if (j === i) { + continue + } + if (bwVal !== ls2bw[j]) { + continue + } + if (sumls[i] < sumls[j]) { + ls2bw[i] = 0 + } else if (sumls[i] === sumls[j] && i < j) { + ls2bw[i] = 0 + } // ties: arbitrary winner + } + } + const vxs = new Uint32Array(nvox).fill(0) + for (let i = 0; i < nvox; i++) { + vxs[i] = ls2bw[ls[i]] + } + return [mxbw, vxs] + } + + // given a 3D image, return a clustered label map + // for an explanation and optimized C code see + // https://github.com/seung-lab/connected-components-3d + bwlabel(img, dim, conn = 26, binarize = false, onlyLargestClusterPerClass = false) { + const start = Date.now() + const nvox = dim[0] * dim[1] * dim[2] + const bw = new Uint32Array(nvox).fill(0) + if (![6, 18, 26].includes(conn)) { + console.log('bwlabel: conn must be 6, 18 or 26.') + return [0, bw] + } + if (dim[0] < 2 || dim[1] < 2 || dim[2] < 1) { + console.log('bwlabel: img must be 2 or 3-dimensional') + return [0, bw] + } + if (binarize) { + for (let i = 0; i < nvox; i++) { + if (img[i] !== 0.0) { + bw[i] = 1 + } + } + } else { + bw.set(img) + } + let [ttn, tt, il] = this.do_initial_labelling(bw, dim, conn) + if (tt === undefined) { + tt = new Uint32Array(0) + } + const [cl, ls] = this.translate_labels(il, dim, tt, ttn) + console.log(conn + ' neighbor clustering into ' + cl + ' regions in ' + (Date.now() - start) + 'ms') + if (onlyLargestClusterPerClass) { + const [nbw, bwMx] = this.largest_original_cluster_labels(bw, cl, ls) + return [nbw, bwMx] + } + return [cl, ls] + } // bwlabel() +} diff --git a/index.html b/index.html new file mode 100644 index 0000000..db73800 --- /dev/null +++ b/index.html @@ -0,0 +1,129 @@ + + + + + + + + Niivue brain chop + + + +
+ + + + +   + + +   + + +   + +   + +   + +   + +   + + +   + + +
+
+ +
+ + +
+

+ + +

+

+ +

+

+ + +

+

+ + +

+

+ + +

+

+ + +

+ + +
+
+ +
+

+ +

+

+ +

+ + +
+
+ + + + \ No newline at end of file diff --git a/main.js b/main.js new file mode 100644 index 0000000..ee89bd1 --- /dev/null +++ b/main.js @@ -0,0 +1,343 @@ +import { Niivue, NVMeshUtilities } from "@niivue/niivue" +import { Niimath } from "@niivue/niimath" +// import {runInference } from './brainchop-mainthread.js' +import { inferenceModelsList, brainChopOpts } from "./brainchop-parameters.js" +import { isChrome, localSystemDetails } from "./brainchop-telemetry.js" +import MyWorker from "./brainchop-webworker.js?worker" + +// class NiiMathWrapper { +// constructor(workerScript) { +// this.worker = new Worker(workerScript) +// } +// static async load(workerScript = './niimathWorker.js') { +// return new NiiMathWrapper(workerScript) +// } +// niimath(niiBuffer, operationsText) { +// return new Promise((resolve, reject) => { +// const niiBlob = new Blob([niiBuffer], { type: 'application/octet-stream' }) +// const inName = 'input.nii' // or derive from context +// let outName = inName +// if (operationsText.includes("-mesh")) { +// outName = 'output.mz3' // or derive from context +// } +// const args = operationsText.trim().split(/\s+/) +// args.unshift(inName) +// args.push(outName) +// const file = new File([niiBlob], inName) +// this.worker.onmessage = (e) => { +// if (e.data.blob instanceof Blob) { +// const reader = new FileReader() +// reader.onload = () => { +// resolve(reader.result) // return ArrayBuffer +// } +// reader.onerror = () => { +// reject(new Error('Failed to read the Blob as an ArrayBuffer')) +// } +// reader.readAsArrayBuffer(e.data.blob) +// } else { +// reject(new Error('Expected Blob from worker')) +// } +// } +// this.worker.onerror = (e) => { +// reject(new Error(e.message)) +// } +// this.worker.postMessage({ blob: file, cmd: args, outName: outName }) +// }) +// } +// terminate() { +// this.worker.terminate() +// } +// } + +async function main() { + const niimath = new Niimath() + await niimath.init() + // const wrapper = await NiiMathWrapper.load() + /*smoothCheck.onchange = function () { + nv1.setInterpolation(!smoothCheck.checked) + }*/ + aboutBtn.onclick = function () { + const url = "https://github.com/neurolabusc/niivue-brainchop"; + window.open(url, '_blank'); + } + /*diagnosticsBtn.onclick = function () { + if (diagnosticsString.length < 1) { + window.alert('No diagnostic string generated: run a model to create diagnostics') + return + } + navigator.clipboard.writeText(diagnosticsString) + window.alert('Diagnostics copied to clipboard\n' + diagnosticsString) + }*/ + opacitySlider0.oninput = function () { + nv1.setOpacity(0, opacitySlider0.value / 255) + nv1.updateGLVolume() + } + opacitySlider1.oninput = function () { + nv1.setOpacity(1, opacitySlider1.value / 255) + } + async function ensureConformed() { + let nii = nv1.volumes[0] + let isConformed = ((nii.dims[1] === 256) && (nii.dims[2] === 256) && (nii.dims[3] === 256)) + if ((nii.permRAS[0] !== -1) || (nii.permRAS[1] !== 3) || (nii.permRAS[2] !== -2)) + isConformed = false + if (isConformed) + return + let nii2 = await nv1.conform(nii, false) + await nv1.removeVolume(nv1.volumes[0]) + await nv1.addVolume(nii2) + } + async function closeAllOverlays() { + while (nv1.volumes.length > 1) { + await nv1.removeVolume(nv1.volumes[1]) + } + } + modelSelect.onchange = async function () { + if (this.selectedIndex < 0) + modelSelect.selectedIndex = 11 + await closeAllOverlays() + await ensureConformed() + let model = inferenceModelsList[this.selectedIndex] + model.isNvidia = false + const rendererInfo = nv1.gl.getExtension('WEBGL_debug_renderer_info') + if (rendererInfo) { + model.isNvidia = nv1.gl.getParameter(rendererInfo.UNMASKED_RENDERER_WEBGL).includes('NVIDIA') + + } + + let opts = brainChopOpts + opts.rootURL = location.href + const isLocalhost = Boolean( + window.location.hostname === 'localhost' || + // [::1] is the IPv6 localhost address. + window.location.hostname === '[::1]' || + // 127.0.0.1/8 is considered localhost for IPv4. + window.location.hostname.match( + /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ + ) + ) + if (isLocalhost) + opts.rootURL = location.protocol + '//' + location.host + if (workerCheck.checked) { + if(typeof(chopWorker) !== "undefined") { + console.log('Unable to start new segmentation: previous call has not completed') + return + } + chopWorker = await new MyWorker({ type: "module" }) + let hdr = {datatypeCode: nv1.volumes[0].hdr.datatypeCode, dims: nv1.volumes[0].hdr.dims} + let msg = {opts:opts, modelEntry: model, niftiHeader: hdr, niftiImage: nv1.volumes[0].img} + chopWorker.postMessage(msg) + chopWorker.onmessage = function(event) { + let cmd = event.data.cmd + if (cmd === 'ui') { + if (event.data.modalMessage !== "") { + chopWorker.terminate() + chopWorker = undefined + } + callbackUI(event.data.message, event.data.progressFrac, event.data.modalMessage, event.data.statData) + } + if (cmd === 'img') { + chopWorker.terminate() + chopWorker = undefined + callbackImg(event.data.img, event.data.opts, event.data.modelEntry) + } + } + } else { + console.log('Only provided with webworker code, see main brainchop github repository for main thread code') + // runInference(opts, model, nv1.volumes[0].hdr, nv1.volumes[0].img, callbackImg, callbackUI) + } + } + saveBtn.onclick = function () { + nv1.volumes[1].saveToDisk("Custom.nii") + } + workerCheck.onchange = function () { + modelSelect.onchange() + } + clipCheck.onchange = function () { + if (clipCheck.checked) { + nv1.setClipPlane([0, 0, 90]) + } else { + nv1.setClipPlane([2, 0, 90]) + } + } + function doLoadImage() { + opacitySlider0.oninput() + } + async function fetchJSON(fnm) { + const response = await fetch(fnm) + const js = await response.json() + return js + } + async function callbackImg(img, opts, modelEntry) { + closeAllOverlays() + let overlayVolume = await nv1.volumes[0].clone() + overlayVolume.zeroImage() + overlayVolume.hdr.scl_inter = 0 + overlayVolume.hdr.scl_slope = 1 + overlayVolume.img = new Uint8Array(img) + if (modelEntry.colormapPath) { + let cmap = await fetchJSON(modelEntry.colormapPath) + overlayVolume.setColormapLabel(cmap) + // n.b. most models create indexed labels, but those without colormap mask scalar input + overlayVolume.hdr.intent_code = 1002 // NIFTI_INTENT_LABEL + } else { + let colormap = opts.atlasSelectedColorTable.toLowerCase() + const cmaps = nv1.colormaps() + if (!cmaps.includes(colormap)) { + colormap = 'actc' + } + overlayVolume.colormap = colormap + } + overlayVolume.opacity = opacitySlider1.value / 255 + await nv1.addVolume(overlayVolume) + } + async function reportTelemetry(statData) { + if (typeof statData === 'string' || statData instanceof String) { + function strToArray(str) { + const list = JSON.parse(str) + const array = [] + for (const key in list) { + array[key] = list[key] + } + return array + } + statData = strToArray(statData) + } + statData = await localSystemDetails(statData, nv1.gl) + diagnosticsString = ':: Diagnostics can help resolve issues https://github.com/neuroneural/brainchop/issues ::\n' + for (var key in statData){ + diagnosticsString += key + ': ' + statData[key]+'\n' + } + } + function callbackUI(message = "", progressFrac = -1, modalMessage = "", statData = []) { + if (message !== "") { + console.log(message) + document.getElementById("location").innerHTML = message + } + if (isNaN(progressFrac)) { //memory issue + memstatus.style.color = "red" + memstatus.innerHTML = "Memory Issue" + } else if (progressFrac >= 0) { + modelProgress.value = progressFrac * modelProgress.max + } + if (modalMessage !== "") { + window.alert(modalMessage) + } + if (Object.keys(statData).length > 0) { + reportTelemetry(statData) + } + } + function handleLocationChange(data) { + document.getElementById("location").innerHTML = "  " + data.string + } + let defaults = { + backColor: [0.4, 0.4, 0.4, 1], + show3Dcrosshair: true, + onLocationChange: handleLocationChange, + } + createMeshBtn.onclick = function () { + if (nv1.meshes.length > 0) + nv1.removeMesh(nv1.meshes[0]) + if (nv1.volumes.length < 1) { + window.alert("Image not loaded. Drag and drop an image.") + } else { + remeshDialog.show() + } + } + applyBtn.onclick = async function () { + const niiBuffer = await nv1.saveImage({volumeByIndex: nv1.volumes.length - 1}).buffer + const niiBlob = new Blob([niiBuffer], { type: 'application/octet-stream' }) + const niiFile = new File([niiBlob], 'input.nii') + // get an ImageProcessor instance from niimath + // so we can build up the operations we want to perform + // based on the UI controls + let image = niimath.image(niiFile) + loadingCircle.classList.remove('hidden') + // initialize the operations object for the niimath mesh function + let ops = { + i: 0.5, + } + //const largestCheckValue = largestCheck.checked + if (largestCheck.checked) { + ops.l = 1 + } + let reduce = Math.min(Math.max(Number(shrinkPct.value) / 100, 0.01), 1) + ops.r = reduce + if (bubbleCheck.checked) { + ops.b = 1 + } + + let hollowInt = Number(hollowSelect.value ) + if (hollowInt < 0){ + // append the hollow operation to the image processor + // but dont run it yet. + image = image.hollow(0.5, hollowInt) + } + + let closeFloat = Number(closeMM.value) + if ((isFinite(closeFloat)) && (closeFloat > 0)){ + // append the close operation to the image processor + // but dont run it yet. + image = image.close(0.5, closeFloat, 2 * closeFloat) + } + // add the mesh operations + image = image.mesh(ops) + console.log('niimath mesh operation', image.commands) + // finally, run the full set of operations + const outFile = await image.run('output.mz3') + const arrayBuffer = await outFile.arrayBuffer() + loadingCircle.classList.add('hidden') + if (nv1.meshes.length > 0) + nv1.removeMesh(nv1.meshes[0]) + await nv1.loadFromArrayBuffer(arrayBuffer, 'output.mz3') + nv1.reverseFaces(0) + } + saveMeshBtn.onclick = function () { + if (nv1.meshes.length < 1) { + window.alert("No mesh open for saving. Use 'Create Mesh'.") + } else { + saveDialog.show() + } + } + applySaveBtn.onclick = function () { + if (nv1.meshes.length < 1) { + return + } + let format = 'obj' + if (formatSelect.selectedIndex === 0) { + format = 'mz3' + } + if (formatSelect.selectedIndex === 2) { + format = 'stl' + } + const scale = 1 / Number(scaleSelect.value) + const pts = nv1.meshes[0].pts.slice() + for (let i = 0; i < pts.length; i++) + pts[i] *= scale; + NVMeshUtilities.saveMesh(pts, nv1.meshes[0].tris, `mesh.${format}`, true) + } + + var diagnosticsString = '' + var chopWorker + let nv1 = new Niivue(defaults) + nv1.attachToCanvas(gl1) + nv1.opts.dragMode = nv1.dragModes.pan + nv1.opts.multiplanarForceRender = true + nv1.opts.yoke3Dto2DZoom = true + nv1.opts.crosshairGap = 11 + await nv1.loadVolumes([{ url: "./t1_crop.nii.gz" }]) + for (let i = 0; i < inferenceModelsList.length; i++) { + var option = document.createElement("option") + option.text = inferenceModelsList[i].modelName + option.value = inferenceModelsList[i].id.toString() + modelSelect.appendChild(option) + } + nv1.onImageLoaded = doLoadImage + modelSelect.selectedIndex = -1 + workerCheck.checked = await isChrome() //TODO: Safari does not yet support WebGL TFJS webworkers, test FireFox + // uncomment next two lines to automatically run segmentation when web page is loaded + // modelSelect.selectedIndex = 11 + // modelSelect.onchange() + +} + +main() diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..8b3f78a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,1498 @@ +{ + "name": "niivue-brainchop", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "niivue-brainchop", + "version": "0.1.0", + "dependencies": { + "@niivue/niimath": "^0.1.1", + "@niivue/niivue": "^0.44.2", + "@tensorflow/tfjs": "^4.19.0", + "gl-matrix": "^3.4.3" + }, + "devDependencies": { + "vite": "^5.4.2" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@lukeed/csprng": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz", + "integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@lukeed/uuid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@lukeed/uuid/-/uuid-2.0.1.tgz", + "integrity": "sha512-qC72D4+CDdjGqJvkFMMEAtancHUQ7/d/tAiHf64z8MopFDmcrtbcJuerDtFceuAfQJ2pDSfCKCtbqoGBNnwg0w==", + "dependencies": { + "@lukeed/csprng": "^1.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@niivue/niimath": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@niivue/niimath/-/niimath-0.1.1.tgz", + "integrity": "sha512-MI2+cST2VmTWja7Yws67Nq9I7+7xOZDCXFwOjMw5sOGTa5iR5tF09py2AxnmplIK3syR+kyIzkOLq6xx8vbCpA==", + "license": "BSD-2-Clause" + }, + "node_modules/@niivue/niivue": { + "version": "0.44.2", + "resolved": "https://registry.npmjs.org/@niivue/niivue/-/niivue-0.44.2.tgz", + "integrity": "sha512-ddShcApln/xJQa1dIo7DPpcRPoabBA6n7Wy8gtwA1S6srhZn2PdaPep1VEqdS82ufbFier0SRFE4lVzt7/ZDKw==", + "license": "BSD-2-Clause", + "dependencies": { + "@lukeed/uuid": "^2.0.1", + "@ungap/structured-clone": "^1.2.0", + "array-equal": "^1.0.2", + "daikon": "^1.2.46", + "fflate": "^0.8.2", + "gl-matrix": "^3.4.3", + "nifti-reader-js": "^0.6.8", + "rxjs": "^7.8.1" + }, + "optionalDependencies": { + "@rollup/rollup-linux-x64-gnu": "^4.18.1" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.21.0.tgz", + "integrity": "sha512-WTWD8PfoSAJ+qL87lE7votj3syLavxunWhzCnx3XFxFiI/BA/r3X7MUM8dVrH8rb2r4AiO8jJsr3ZjdaftmnfA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.21.0.tgz", + "integrity": "sha512-a1sR2zSK1B4eYkiZu17ZUZhmUQcKjk2/j9Me2IDjk1GHW7LB5Z35LEzj9iJch6gtUfsnvZs1ZNyDW2oZSThrkA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.21.0.tgz", + "integrity": "sha512-zOnKWLgDld/svhKO5PD9ozmL6roy5OQ5T4ThvdYZLpiOhEGY+dp2NwUmxK0Ld91LrbjrvtNAE0ERBwjqhZTRAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.21.0.tgz", + "integrity": "sha512-7doS8br0xAkg48SKE2QNtMSFPFUlRdw9+votl27MvT46vo44ATBmdZdGysOevNELmZlfd+NEa0UYOA8f01WSrg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.21.0.tgz", + "integrity": "sha512-pWJsfQjNWNGsoCq53KjMtwdJDmh/6NubwQcz52aEwLEuvx08bzcy6tOUuawAOncPnxz/3siRtd8hiQ32G1y8VA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.21.0.tgz", + "integrity": "sha512-efRIANsz3UHZrnZXuEvxS9LoCOWMGD1rweciD6uJQIx2myN3a8Im1FafZBzh7zk1RJ6oKcR16dU3UPldaKd83w==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.21.0.tgz", + "integrity": "sha512-ZrPhydkTVhyeGTW94WJ8pnl1uroqVHM3j3hjdquwAcWnmivjAwOYjTEAuEDeJvGX7xv3Z9GAvrBkEzCgHq9U1w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.21.0.tgz", + "integrity": "sha512-cfaupqd+UEFeURmqNP2eEvXqgbSox/LHOyN9/d2pSdV8xTrjdg3NgOFJCtc1vQ/jEke1qD0IejbBfxleBPHnPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.21.0.tgz", + "integrity": "sha512-ZKPan1/RvAhrUylwBXC9t7B2hXdpb/ufeu22pG2psV7RN8roOfGurEghw1ySmX/CmDDHNTDDjY3lo9hRlgtaHg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.21.0.tgz", + "integrity": "sha512-H1eRaCwd5E8eS8leiS+o/NqMdljkcb1d6r2h4fKSsCXQilLKArq6WS7XBLDu80Yz+nMqHVFDquwcVrQmGr28rg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.21.0.tgz", + "integrity": "sha512-zJ4hA+3b5tu8u7L58CCSI0A9N1vkfwPhWd/puGXwtZlsB5bTkwDNW/+JCU84+3QYmKpLi+XvHdmrlwUwDA6kqw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.21.0.tgz", + "integrity": "sha512-e2hrvElFIh6kW/UNBQK/kzqMNY5mO+67YtEh9OA65RM5IJXYTWiXjX6fjIiPaqOkBthYF1EqgiZ6OXKcQsM0hg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.21.0.tgz", + "integrity": "sha512-1vvmgDdUSebVGXWX2lIcgRebqfQSff0hMEkLJyakQ9JQUbLDkEaMsPTLOmyccyC6IJ/l3FZuJbmrBw/u0A0uCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.21.0.tgz", + "integrity": "sha512-s5oFkZ/hFcrlAyBTONFY1TWndfyre1wOMwU+6KCpm/iatybvrRgmZVM+vCFwxmC5ZhdlgfE0N4XorsDpi7/4XQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.21.0.tgz", + "integrity": "sha512-G9+TEqRnAA6nbpqyUqgTiopmnfgnMkR3kMukFBDsiyy23LZvUCpiUwjTRx6ezYCjJODXrh52rBR9oXvm+Fp5wg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.21.0.tgz", + "integrity": "sha512-2jsCDZwtQvRhejHLfZ1JY6w6kEuEtfF9nzYsZxzSlNVKDX+DpsDJ+Rbjkm74nvg2rdx0gwBS+IMdvwJuq3S9pQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@tensorflow/tfjs": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs/-/tfjs-4.19.0.tgz", + "integrity": "sha512-d2A1lTc6my7GJ5LwqzXa+igJ5+18exwsnaphZ3roi5nJ197uwxVSMIc2vSJnqZz1KajC5/mZgQr67EZrpTFlBg==", + "dependencies": { + "@tensorflow/tfjs-backend-cpu": "4.19.0", + "@tensorflow/tfjs-backend-webgl": "4.19.0", + "@tensorflow/tfjs-converter": "4.19.0", + "@tensorflow/tfjs-core": "4.19.0", + "@tensorflow/tfjs-data": "4.19.0", + "@tensorflow/tfjs-layers": "4.19.0", + "argparse": "^1.0.10", + "chalk": "^4.1.0", + "core-js": "3.29.1", + "regenerator-runtime": "^0.13.5", + "yargs": "^16.0.3" + }, + "bin": { + "tfjs-custom-module": "dist/tools/custom_module/cli.js" + } + }, + "node_modules/@tensorflow/tfjs-backend-cpu": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-cpu/-/tfjs-backend-cpu-4.19.0.tgz", + "integrity": "sha512-7pT05Ea6GTXjbqRgkmayZRYvaiNl3LLk1TyfUvC8iIqMw5d7p4Wgte2pfM2gMbIZ/opOxURhFYuI0FiQvUrW6g==", + "dependencies": { + "@types/seedrandom": "^2.4.28", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.19.0" + } + }, + "node_modules/@tensorflow/tfjs-backend-webgl": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-webgl/-/tfjs-backend-webgl-4.19.0.tgz", + "integrity": "sha512-R0DC1W65lqTOccCwxMhH+VOKCgSrhd9GEejIIGhjeXt6oZlACFnOx4SuUr/qKLCDsL5I4E9iFLxAJMmsfYvARw==", + "dependencies": { + "@tensorflow/tfjs-backend-cpu": "4.19.0", + "@types/offscreencanvas": "~2019.3.0", + "@types/seedrandom": "^2.4.28", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.19.0" + } + }, + "node_modules/@tensorflow/tfjs-converter": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-converter/-/tfjs-converter-4.19.0.tgz", + "integrity": "sha512-xIOE6enaVHPYCXKpHxJnUlN8hzlcQkgFSymHjBmdDnNCresuRwBGz4dqYAQMeQG21Ei3lxCQFdDDH7aSvUEAPw==", + "peerDependencies": { + "@tensorflow/tfjs-core": "4.19.0" + } + }, + "node_modules/@tensorflow/tfjs-core": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-4.19.0.tgz", + "integrity": "sha512-GZ0d53PG0HGQCC7hbWv1qDnZctHYe/cafHZrBY5eNeQjQE6fBr3NsR5GfLadT0TELwmX9/nyritGDzvy6xmzHQ==", + "dependencies": { + "@types/long": "^4.0.1", + "@types/offscreencanvas": "~2019.7.0", + "@types/seedrandom": "^2.4.28", + "@webgpu/types": "0.1.38", + "long": "4.0.0", + "node-fetch": "~2.6.1", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + } + }, + "node_modules/@tensorflow/tfjs-core/node_modules/@types/offscreencanvas": { + "version": "2019.7.3", + "resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.7.3.tgz", + "integrity": "sha512-ieXiYmgSRXUDeOntE1InxjWyvEelZGP63M+cGuquuRLuIKKT1osnkXjxev9B7d1nXSug5vpunx+gNlbVxMlC9A==" + }, + "node_modules/@tensorflow/tfjs-data": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-data/-/tfjs-data-4.19.0.tgz", + "integrity": "sha512-n0ZgJp5UhhBatohUt9pXSCCApusK+1Flyk6yDrQYuxOTjhRppd6jYrF7LCDG3hMFi3QLGl0jab1zYrn9BwtC/w==", + "dependencies": { + "@types/node-fetch": "^2.1.2", + "node-fetch": "~2.6.1", + "string_decoder": "^1.3.0" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.19.0", + "seedrandom": "^3.0.5" + } + }, + "node_modules/@tensorflow/tfjs-layers": { + "version": "4.19.0", + "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-layers/-/tfjs-layers-4.19.0.tgz", + "integrity": "sha512-NufvuRaZdIyoG+R13d7oL8G5Bywox+ihPMiMZ3tWU+me8C8Y0pVC69mrnhOS9R8an7GDxKKSTTNEZhUvPvMGiQ==", + "peerDependencies": { + "@tensorflow/tfjs-core": "4.19.0" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/long": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz", + "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==" + }, + "node_modules/@types/node": { + "version": "20.12.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.7.tgz", + "integrity": "sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, + "node_modules/@types/offscreencanvas": { + "version": "2019.3.0", + "resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz", + "integrity": "sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q==" + }, + "node_modules/@types/seedrandom": { + "version": "2.4.34", + "resolved": "https://registry.npmjs.org/@types/seedrandom/-/seedrandom-2.4.34.tgz", + "integrity": "sha512-ytDiArvrn/3Xk6/vtylys5tlY6eo7Ane0hvcx++TKo6RxQXuVfW0AF/oeWqAj9dN29SyhtawuXstgmPlwNcv/A==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, + "node_modules/@wearemothership/dicom-character-set": { + "version": "1.0.4-opt.1", + "resolved": "https://registry.npmjs.org/@wearemothership/dicom-character-set/-/dicom-character-set-1.0.4-opt.1.tgz", + "integrity": "sha512-stqhnpawYHY2UZKj4RHTF71ab3q3z8S1SO9ToQKjsHQwowUdFVo6YFea93psFux3yqNbRlQjwoCdPjHcD0YQzw==", + "engines": { + "node": ">=10" + } + }, + "node_modules/@webgpu/types": { + "version": "0.1.38", + "resolved": "https://registry.npmjs.org/@webgpu/types/-/types-0.1.38.tgz", + "integrity": "sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==" + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/array-equal": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.2.tgz", + "integrity": "sha512-gUHx76KtnhEgB3HOuFYiCm3FIdEs6ocM2asHvNTkfu/Y09qQVrrVVaOKENmS2KkSaGoxgXNqC+ZVtR/n0MOkSA==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/core-js": { + "version": "3.29.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.29.1.tgz", + "integrity": "sha512-+jwgnhg6cQxKYIIjGtAHq2nwUOolo9eoFZ4sHfUH09BLXBgxnH4gA0zEd+t+BO2cNB8idaBtZFcFTRjQJRJmAw==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/cssfilter": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/cssfilter/-/cssfilter-0.0.10.tgz", + "integrity": "sha512-FAaLDaplstoRsDR8XGYH51znUN0UY7nMc6Z9/fvE8EXGwvJE9hu7W2vHwx1+bd6gCYnln9nLbzxFTrcO9YQDZw==" + }, + "node_modules/daikon": { + "version": "1.2.46", + "resolved": "https://registry.npmjs.org/daikon/-/daikon-1.2.46.tgz", + "integrity": "sha512-S8dTTlsWYTH3LQztjTW9KnNvxDeL2mr2cau0auLdYMJe4TrocYP1PmidHizO3rXUs+gXpBWI1PQ2qvB4b21QFw==", + "dependencies": { + "@wearemothership/dicom-character-set": "^1.0.4-opt.1", + "fflate": "*", + "jpeg-lossless-decoder-js": "2.0.7", + "pako": "^2.1", + "xss": "1.0.14" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==" + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/gl-matrix": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/gl-matrix/-/gl-matrix-3.4.3.tgz", + "integrity": "sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/jpeg-lossless-decoder-js": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/jpeg-lossless-decoder-js/-/jpeg-lossless-decoder-js-2.0.7.tgz", + "integrity": "sha512-tbZlhFkKmx+JaqVMkq47SKWGuXLkIaV8fTbnhO39dYEnQrSShLGuLCGb0n6ntXjtmk6oAWGiIriWOLwj9od0yQ==" + }, + "node_modules/long": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/nifti-reader-js": { + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/nifti-reader-js/-/nifti-reader-js-0.6.8.tgz", + "integrity": "sha512-yIKNVzYFiUcSHazoR+sd6Ka7sUmZTabaVqJRFxbdlAKR1hnPBuNP71g3AyApo37nJ3k41c632QPij5q7gF1YPQ==", + "dependencies": { + "fflate": "*" + } + }, + "node_modules/node-fetch": { + "version": "2.6.13", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.13.tgz", + "integrity": "sha512-StxNAxh15zr77QvvkmveSQ8uCQ4+v5FkvNTj0OESmiHu+VRi/gXArXtkWMElOsOUNLtUEvI4yS+rdtOHZTwlQA==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/pako": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz", + "integrity": "sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==" + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "dev": true, + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.41", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.41.tgz", + "integrity": "sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.1", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.21.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.21.0.tgz", + "integrity": "sha512-vo+S/lfA2lMS7rZ2Qoubi6I5hwZwzXeUIctILZLbHI+laNtvhhOIon2S1JksA5UEDQ7l3vberd0fxK44lTYjbQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.21.0", + "@rollup/rollup-android-arm64": "4.21.0", + "@rollup/rollup-darwin-arm64": "4.21.0", + "@rollup/rollup-darwin-x64": "4.21.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.21.0", + "@rollup/rollup-linux-arm-musleabihf": "4.21.0", + "@rollup/rollup-linux-arm64-gnu": "4.21.0", + "@rollup/rollup-linux-arm64-musl": "4.21.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.21.0", + "@rollup/rollup-linux-riscv64-gnu": "4.21.0", + "@rollup/rollup-linux-s390x-gnu": "4.21.0", + "@rollup/rollup-linux-x64-gnu": "4.21.0", + "@rollup/rollup-linux-x64-musl": "4.21.0", + "@rollup/rollup-win32-arm64-msvc": "4.21.0", + "@rollup/rollup-win32-ia32-msvc": "4.21.0", + "@rollup/rollup-win32-x64-msvc": "4.21.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/seedrandom": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-3.0.5.tgz", + "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==" + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/vite": { + "version": "5.4.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.2.tgz", + "integrity": "sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.41", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/xss": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/xss/-/xss-1.0.14.tgz", + "integrity": "sha512-og7TEJhXvn1a7kzZGQ7ETjdQVS2UfZyTlsEdDOqvQF7GoxNfY+0YLCzBy1kPdsDDx4QuNAonQPddpsn6Xl/7sw==", + "dependencies": { + "commander": "^2.20.3", + "cssfilter": "0.0.10" + }, + "bin": { + "xss": "bin/xss" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "engines": { + "node": ">=10" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..3a21cad --- /dev/null +++ b/package.json @@ -0,0 +1,20 @@ +{ + "name": "niivue-brainchop", + "private": true, + "version": "0.1.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "@niivue/niimath": "^0.1.1", + "@niivue/niivue": "^0.44.2", + "@tensorflow/tfjs": "^4.19.0", + "gl-matrix": "^3.4.3" + }, + "devDependencies": { + "vite": "^5.4.2" + } +} diff --git a/public/favicon.ico b/public/favicon.ico new file mode 100644 index 0000000..de06861 Binary files /dev/null and b/public/favicon.ico differ diff --git a/public/models/GT/labels.json b/public/models/GT/labels.json new file mode 100644 index 0000000..35ee231 --- /dev/null +++ b/public/models/GT/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "Grey Matter", "2": "White Matter"} diff --git a/public/models/mnm_tfjs_me_test/colorLUT.json b/public/models/mnm_tfjs_me_test/colorLUT.json new file mode 100644 index 0000000..6da374a --- /dev/null +++ b/public/models/mnm_tfjs_me_test/colorLUT.json @@ -0,0 +1 @@ +{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/mnm_tfjs_me_test/group1-shard1of1.bin b/public/models/mnm_tfjs_me_test/group1-shard1of1.bin new file mode 100644 index 0000000..210906a Binary files /dev/null and b/public/models/mnm_tfjs_me_test/group1-shard1of1.bin differ diff --git a/public/models/mnm_tfjs_me_test/labels.json b/public/models/mnm_tfjs_me_test/labels.json new file mode 100644 index 0000000..4885a94 --- /dev/null +++ b/public/models/mnm_tfjs_me_test/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/mnm_tfjs_me_test/model.json b/public/models/mnm_tfjs_me_test/model.json new file mode 100644 index 0000000..e288b78 --- /dev/null +++ b/public/models/mnm_tfjs_me_test/model.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.4.0", "convertedBy": "TensorFlow.js Converter v3.2.0", "modelTopology": {"keras_version": "2.4.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 38, 38, 38, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "17", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "17", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "18", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "18", "inbound_nodes": [[["17", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["18", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["30", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "17/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "17/bias", "shape": [21], "dtype": "float32"}, {"name": "19/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model11_50class/colorLUT.json b/public/models/model11_50class/colorLUT.json new file mode 100644 index 0000000..99c0f7e --- /dev/null +++ b/public/models/model11_50class/colorLUT.json @@ -0,0 +1,52 @@ +{ + "0": "rgb(0,0,0)", + "1": "rgb(245,245,245)", + "2": "rgb(196,58,250)", + "3": "rgb(220,248,164)", + "4": "rgb(230,148,34)", + "5": "rgb(0,118,14)", + "6": "rgb(122,186,220)", + "7": "rgb(236,13,176)", + "8": "rgb(12,48,255)", + "9": "rgb(119,159,176)", + "10": "rgb(220,216,20)", + "11": "rgb(103,255,255)", + "12": "rgb(60,60,60)", + "13": "rgb(255,165,0)", + "14": "rgb(165,42,42)", + "15": "rgb(0,0,208)", + "16": "rgb(25,100,40)", + "17": "rgb(125,100,160)", + "18": "rgb(100,25,0)", + "19": "rgb(220,20,100)", + "20": "rgb(220,20,10)", + "21": "rgb(180,220,140)", + "22": "rgb(220,60,220)", + "23": "rgb(180,40,120)", + "24": "rgb(140,20,140)", + "25": "rgb(20,30,140)", + "26": "rgb(35,75,50)", + "27": "rgb(225,140,140)", + "28": "rgb(200,35,75)", + "29": "rgb(160,100,50)", + "30": "rgb(20,220,60)", + "31": "rgb(60,220,60)", + "32": "rgb(220,180,140)", + "33": "rgb(20,100,50)", + "34": "rgb(220,60,20)", + "35": "rgb(120,100,60)", + "36": "rgb(220,20,20)", + "37": "rgb(220,180,220)", + "38": "rgb(60,20,220)", + "39": "rgb(160,140,180)", + "40": "rgb(80,20,140)", + "41": "rgb(75,50,125)", + "42": "rgb(20,220,160)", + "43": "rgb(20,180,140)", + "44": "rgb(140,220,220)", + "45": "rgb(80,160,20)", + "46": "rgb(100,0,100)", + "47": "rgb(70,70,70)", + "48": "rgb(150,150,200)", + "49": "rgb(255,192,32)" +} \ No newline at end of file diff --git a/public/models/model11_50class/group1-shard1of1.bin b/public/models/model11_50class/group1-shard1of1.bin new file mode 100644 index 0000000..216fc84 Binary files /dev/null and b/public/models/model11_50class/group1-shard1of1.bin differ diff --git a/public/models/model11_50class/labels.json b/public/models/model11_50class/labels.json new file mode 100644 index 0000000..58541ce --- /dev/null +++ b/public/models/model11_50class/labels.json @@ -0,0 +1,52 @@ +{ + "0": "BG", + "1": "Cerebral-White-Matter", + "2": "Ventricle", + "3": "Cerebellum-White-Matter", + "4": "Cerebellum", + "5": "Thalamus-Proper*", + "6": "Caudate", + "7": "Putamen", + "8": "Pallidum", + "9": "Brain-Stem", + "10": "Hippocampus", + "11": "Amygdala", + "12": "CSF", + "13": "Accumbens-area", + "14": "VentralDC", + "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior", + "16": "ctx-bankssts", + "17": "ctx-caudalanteriorcingulate", + "18": "ctx-caudalmiddlefrontal", + "19": "ctx-cuneus", + "20": "ctx-entorhinal", + "21": "ctx-fusiform", + "22": "ctx-inferiorparietal", + "23": "ctx-inferiortemporal", + "24": "ctx-isthmuscingulate", + "25": "ctx-lateraloccipital", + "26": "ctx-lateralorbitofrontal", + "27": "ctx-lingual", + "28": "ctx-medialorbitofrontal", + "29": "ctx-middletemporal", + "30": "ctx-parahippocampal", + "31": "ctx-paracentral", + "32": "ctx-parsopercularis", + "33": "ctx-parsorbitalis", + "34": "ctx-parstriangularis", + "35": "ctx-pericalcarine", + "36": "ctx-postcentral", + "37": "ctx-posteriorcingulate", + "38": "ctx-precentral", + "39": "ctx-precuneus", + "40": "ctx-rostralanteriorcingulate", + "41": "ctx-rostralmiddlefrontal", + "42": "ctx-superiorfrontal", + "43": "ctx-superiorparietal", + "44": "ctx-superiortemporal", + "45": "ctx-supramarginal", + "46": "ctx-frontalpole", + "47": "ctx-temporalpole", + "48": "ctx-transversetemporal", + "49": "ctx-insula" +} \ No newline at end of file diff --git a/public/models/model11_50class/labels.zip b/public/models/model11_50class/labels.zip new file mode 100644 index 0000000..824f07b Binary files /dev/null and b/public/models/model11_50class/labels.zip differ diff --git a/public/models/model11_50class/labelsWithCompleteAnnot.json b/public/models/model11_50class/labelsWithCompleteAnnot.json new file mode 100644 index 0000000..8735db8 --- /dev/null +++ b/public/models/model11_50class/labelsWithCompleteAnnot.json @@ -0,0 +1,52 @@ +{ + "0": "BG", + "1": "Left-Cerebral-White-Matter / Right-Cerebral-White-Matter", + "2": "Left-Lateral-Ventricle / Left-Inf-Lat-Vent / Right-Lateral-Ventricle / Right-Inf-Lat-Vent / 3rd-Ventricle / 4th-Ventricle", + "3": "Left-Cerebellum-White-Matter / Right-Cerebellum-White-Matter", + "4": "Left-Cerebellum-Cortex / Right-Cerebellum-Cortex", + "5": "Left-Thalamus-Proper* / Right-Thalamus-Proper*", + "6": "Left-Caudate / Right-Caudate", + "7": "Left-Putamen / Right-Putamen", + "8": "Left-Pallidum / Right-Pallidum", + "9": "Brain-Stem", + "10": "Left-Hippocampus / Right-Hippocampus", + "11": "Left-Amygdala / Right-Amygdala", + "12": "CSF", + "13": "Left-Accumbens-area / Right-Accumbens-area", + "14": "Left-VentralDC / Right-VentralDC", + "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior", + "16": "ctx-lh-bankssts / ctx-rh-bankssts", + "17": "ctx-lh-caudalanteriorcingulate / ctx-rh-caudalanteriorcingulate", + "18": "ctx-lh-caudalmiddlefrontal / ctx-rh-caudalmiddlefrontal", + "19": "ctx-lh-cuneus / ctx-rh-cuneus", + "20": "ctx-lh-entorhinal / ctx-rh-entorhinal", + "21": "ctx-lh-fusiform / ctx-rh-fusiform", + "22": "ctx-lh-inferiorparietal / ctx-rh-inferiorparietal", + "23": "ctx-lh-inferiortemporal / ctx-rh-inferiortemporal", + "24": "ctx-lh-isthmuscingulate / ctx-rh-isthmuscingulate", + "25": "ctx-lh-lateraloccipital / ctx-rh-lateraloccipital", + "26": "ctx-lh-lateralorbitofrontal / ctx-rh-lateralorbitofrontal", + "27": "ctx-lh-lingual / ctx-rh-lingual", + "28": "ctx-lh-medialorbitofrontal / ctx-rh-medialorbitofrontal", + "29": "ctx-lh-middletemporal / ctx-rh-middletemporal", + "30": "ctx-lh-parahippocampal / ctx-rh-parahippocampal", + "31": "ctx-lh-paracentral / ctx-rh-paracentral", + "32": "ctx-lh-parsopercularis / ctx-rh-parsopercularis", + "33": "ctx-lh-parsorbitalis / ctx-rh-parsorbitalis", + "34": "ctx-lh-parstriangularis / ctx-rh-parstriangularis", + "35": "ctx-lh-pericalcarine / ctx-rh-pericalcarine", + "36": "ctx-lh-postcentral / ctx-rh-postcentral", + "37": "ctx-lh-posteriorcingulate / ctx-rh-posteriorcingulate", + "38": "ctx-lh-precentral / ctx-rh-precentral", + "39": "ctx-lh-precuneus / ctx-rh-precuneus", + "40": "ctx-lh-rostralanteriorcingulate / ctx-rh-rostralanteriorcingulate", + "41": "ctx-lh-rostralmiddlefrontal / ctx-rh-rostralmiddlefrontal", + "42": "ctx-lh-superiorfrontal / ctx-rh-superiorfrontal", + "43": "ctx-lh-superiorparietal / ctx-rh-superiorparietal", + "44": "ctx-lh-superiortemporal / ctx-rh-superiortemporal", + "45": "ctx-lh-supramarginal / ctx-rh-supramarginal", + "46": "ctx-lh-frontalpole / ctx-rh-frontalpole", + "47": "ctx-lh-temporalpole / ctx-rh-temporalpole", + "48": "ctx-lh-transversetemporal / ctx-rh-transversetemporal", + "49": "ctx-lh-insula / ctx-rh-insula" +} \ No newline at end of file diff --git a/public/models/model11_50class/model.json b/public/models/model11_50class/model.json new file mode 100644 index 0000000..37f3f21 --- /dev/null +++ b/public/models/model11_50class/model.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 50, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 11], "dtype": "float32"}, {"name": "input.1/bias", "shape": [11], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.12/bias", "shape": [11], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.16/bias", "shape": [11], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.20/bias", "shape": [11], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.24/bias", "shape": [11], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.28/bias", "shape": [11], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.32/bias", "shape": [11], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.4/bias", "shape": [11], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.8/bias", "shape": [11], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 11, 50], "dtype": "float32"}, {"name": "output/bias", "shape": [50], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model11_gw_ae/colorLUT.json b/public/models/model11_gw_ae/colorLUT.json new file mode 100644 index 0000000..6da374a --- /dev/null +++ b/public/models/model11_gw_ae/colorLUT.json @@ -0,0 +1 @@ +{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model11_gw_ae/group1-shard1of1.bin b/public/models/model11_gw_ae/group1-shard1of1.bin new file mode 100644 index 0000000..4a52b2a Binary files /dev/null and b/public/models/model11_gw_ae/group1-shard1of1.bin differ diff --git a/public/models/model11_gw_ae/labels.json b/public/models/model11_gw_ae/labels.json new file mode 100644 index 0000000..4885a94 --- /dev/null +++ b/public/models/model11_gw_ae/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model11_gw_ae/model.json b/public/models/model11_gw_ae/model.json new file mode 100644 index 0000000..e10ce2b --- /dev/null +++ b/public/models/model11_gw_ae/model.json @@ -0,0 +1 @@ +{"_comment": "This model was train on 6000 T1 MRI images of the FreeSurfer labeled MRN data as available in MinfulTensors database Collection MRN for 10 epochs. Mongo database running on trendscn018.rs.gsu.edu . Then it was tuned on the HCP/hcp770 database for 5 epochs of HCP freesurfer data and subsequently trained for 5 epochs again on MRN collection. The final tuning was done on a single epoch of hcp770 with OneCycleLR and lr = 0.0002 ", "_model_location": "/home/users/splis/craft/meshnet/enmesh/logs11ae_gwm_after_hcp_plus1hcp/checkpoints/last.pth", "format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 11], "dtype": "float32"}, {"name": "19/bias", "shape": [11], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "21/bias", "shape": [11], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "23/bias", "shape": [11], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "25/bias", "shape": [11], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "27/bias", "shape": [11], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "29/bias", "shape": [11], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "31/bias", "shape": [11], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "33/bias", "shape": [11], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 11, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} diff --git a/public/models/model18cls/colorLUT.json b/public/models/model18cls/colorLUT.json new file mode 100644 index 0000000..27d12d1 --- /dev/null +++ b/public/models/model18cls/colorLUT.json @@ -0,0 +1,21 @@ +{ + "0": "rgb(0,0,0)", + "1": "rgb(245,245,245)", + "2": "rgb(205,62,78)", + "3": "rgb(120,18,134)", + "4": "rgb(196,58,250)", + "5": "rgb(220,248,164)", + "6": "rgb(230,148,34)", + "7": "rgb(0,118,14)", + "8": "rgb(122,186,220)", + "9": "rgb(236,13,176)", + "10": "rgb(12,48,255)", + "11": "rgb(204,182,142)", + "12": "rgb(42,204,164)", + "13": "rgb(119,159,176)", + "14": "rgb(220,216,20)", + "15": "rgb(103,255,255)", + "16": "rgb(255,165,0)", + "17": "rgb(165,42,42)" +} + diff --git a/public/models/model18cls/colormap.json b/public/models/model18cls/colormap.json new file mode 100644 index 0000000..ee61421 --- /dev/null +++ b/public/models/model18cls/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 245, 205, 120, 196, 220, 230, 0, 122, 236, 12, 204, 42, 119, 220, 103, 255, 165], + "G": [ 0, 245, 62, 18, 58, 248, 148, 118, 186, 13, 48, 182, 204, 159, 216, 255, 165, 42], + "B": [ 0, 245, 78, 134, 250, 164, 34, 14, 220, 176, 255, 142, 164, 176, 20, 255, 0, 42], + "labels": [ "Unknown", "Cerebral-White-Matter", "Cerebral-Cortex", "Lateral-Ventricle", "Inferior-Lateral-Ventricle", "Cerebellum-White-Matter", "Cerebellum-Cortex", "Thalamus", "Caudate", "Putamen", "Pallidum", "3rd-Ventricle", "4th-Ventricle", "Brain-Stem", "Hippocampus", "Amygdala", "Accumbens-area", "VentralDC"] +} \ No newline at end of file diff --git a/public/models/model18cls/labels.json b/public/models/model18cls/labels.json new file mode 100644 index 0000000..d022502 --- /dev/null +++ b/public/models/model18cls/labels.json @@ -0,0 +1,20 @@ +{ + "0": "Unknown", + "1": "Cerebral-White-Matter", + "2": "Cerebral-Cortex", + "3": "Lateral-Ventricle", + "4": "Inferior-Lateral-Ventricle", + "5": "Cerebellum-White-Matter", + "6": "Cerebellum-Cortex", + "7": "Thalamus", + "8": "Caudate", + "9": "Putamen", + "10": "Pallidum", + "11": "3rd-Ventricle", + "12": "4th-Ventricle", + "13": "Brain-Stem", + "14": "Hippocampus", + "15": "Amygdala", + "16": "Accumbens-area", + "17": "VentralDC" +} diff --git a/public/models/model18cls/model.bin b/public/models/model18cls/model.bin new file mode 100644 index 0000000..085ee98 Binary files /dev/null and b/public/models/model18cls/model.bin differ diff --git a/public/models/model18cls/model.json b/public/models/model18cls/model.json new file mode 100644 index 0000000..79f01fe --- /dev/null +++ b/public/models/model18cls/model.json @@ -0,0 +1,808 @@ +{ + "format": "layers-model", + "generatedBy": "keras v2.7.0", + "convertedBy": "TensorFlow.js Converter v3.9.0", + "modelTopology": { + "keras_version": "2.6.0", + "backend": "tensorflow", + "model_config": { + "class_name": "Functional", + "config": { + "name": "model", + "layers": [ + { + "class_name": "InputLayer", + "config": { + "batch_input_shape": [ + null, + 256, + 256, + 256, + 1 + ], + "dtype": "float32", + "sparse": false, + "ragged": false, + "name": "input" + }, + "name": "input", + "inbound_nodes": [] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_0", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_0", + "inbound_nodes": [ + [ + [ + "input", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_1", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_1", + "inbound_nodes": [ + [ + [ + "conv3d_0", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_2", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_2", + "inbound_nodes": [ + [ + [ + "activation_1", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_3", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_3", + "inbound_nodes": [ + [ + [ + "conv3d_2", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_4", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_4", + "inbound_nodes": [ + [ + [ + "activation_3", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_5", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_5", + "inbound_nodes": [ + [ + [ + "conv3d_4", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_6", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_6", + "inbound_nodes": [ + [ + [ + "activation_5", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_7", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_7", + "inbound_nodes": [ + [ + [ + "conv3d_6", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_8", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 16, + 16, + 16 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_8", + "inbound_nodes": [ + [ + [ + "activation_7", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_9", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_9", + "inbound_nodes": [ + [ + [ + "conv3d_8", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_10", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_10", + "inbound_nodes": [ + [ + [ + "activation_9", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_11", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_11", + "inbound_nodes": [ + [ + [ + "conv3d_10", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_12", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_12", + "inbound_nodes": [ + [ + [ + "activation_11", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_13", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_13", + "inbound_nodes": [ + [ + [ + "conv3d_12", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_14", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_14", + "inbound_nodes": [ + [ + [ + "activation_13", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_15", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_15", + "inbound_nodes": [ + [ + [ + "conv3d_14", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_16", + "trainable": false, + "filters": 21, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_16", + "inbound_nodes": [ + [ + [ + "activation_15", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_17", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_17", + "inbound_nodes": [ + [ + [ + "conv3d_16", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "output", + "trainable": false, + "filters": 18, + "kernel_size": [ + 1, + 1, + 1 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "output", + "inbound_nodes": [ + [ + [ + "activation_17", + 0, + 0, + {} + ] + ] + ] + } + ], + "input_layers": [ + [ + "input", + 0, + 0 + ] + ], + "output_layers": [ + [ + "output", + 0, + 0 + ] + ] + } + } + }, + "weightsManifest": [ + { + "paths": [ + "model.bin" + ], + "weights": [ + { + "name": "conv3d_0/kernel", + "shape": [ + 3, + 3, + 3, + 1, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_0/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/kernel", + "shape": [ + 3, + 3, + 3, + 21, + 21 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/bias", + "shape": [ + 21 + ], + "dtype": "float32" + }, + { + "name": "output/kernel", + "shape": [ + 1, + 1, + 1, + 21, + 18 + ], + "dtype": "float32" + }, + { + "name": "output/bias", + "shape": [ + 18 + ], + "dtype": "float32" + } + ] + } + ] +} \ No newline at end of file diff --git a/public/models/model20chan3cls/colorLUT.json b/public/models/model20chan3cls/colorLUT.json new file mode 100644 index 0000000..6da374a --- /dev/null +++ b/public/models/model20chan3cls/colorLUT.json @@ -0,0 +1 @@ +{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model20chan3cls/colormap.json b/public/models/model20chan3cls/colormap.json new file mode 100644 index 0000000..2343358 --- /dev/null +++ b/public/models/model20chan3cls/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 255, 205], + "G": [ 0, 255, 62], + "B": [ 0, 255, 78], + "labels": [ "background", "White Matter", "Grey Matter"] +} \ No newline at end of file diff --git a/public/models/model20chan3cls/labels.json b/public/models/model20chan3cls/labels.json new file mode 100644 index 0000000..4885a94 --- /dev/null +++ b/public/models/model20chan3cls/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model20chan3cls/model.bin b/public/models/model20chan3cls/model.bin new file mode 100644 index 0000000..abbfc8e Binary files /dev/null and b/public/models/model20chan3cls/model.bin differ diff --git a/public/models/model20chan3cls/model.json b/public/models/model20chan3cls/model.json new file mode 100644 index 0000000..4ce0188 --- /dev/null +++ b/public/models/model20chan3cls/model.json @@ -0,0 +1,811 @@ +{ + "_comment": "Normalize the data for this model with min = 5% quantile, max = 95% quantile", + "_model_location": "~/craft/meshnet/enmesh2/logs/tmp/curriculum_enmesh_20channels_gwm/model.last.pth", + "_wandb": "https://wandb.ai/neuroneural/curriculum_20_gwm", + "format": "layers-model", + "generatedBy": "keras v2.7.0", + "convertedBy": "TensorFlow.js Converter v3.9.0", + "modelTopology": { + "keras_version": "2.6.0", + "backend": "tensorflow", + "model_config": { + "class_name": "Functional", + "config": { + "name": "model", + "layers": [ + { + "class_name": "InputLayer", + "config": { + "batch_input_shape": [ + null, + 256, + 256, + 256, + 1 + ], + "dtype": "float32", + "sparse": false, + "ragged": false, + "name": "input" + }, + "name": "input", + "inbound_nodes": [] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_0", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_0", + "inbound_nodes": [ + [ + [ + "input", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_1", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_1", + "inbound_nodes": [ + [ + [ + "conv3d_0", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_2", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_2", + "inbound_nodes": [ + [ + [ + "activation_1", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_3", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_3", + "inbound_nodes": [ + [ + [ + "conv3d_2", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_4", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_4", + "inbound_nodes": [ + [ + [ + "activation_3", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_5", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_5", + "inbound_nodes": [ + [ + [ + "conv3d_4", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_6", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_6", + "inbound_nodes": [ + [ + [ + "activation_5", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_7", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_7", + "inbound_nodes": [ + [ + [ + "conv3d_6", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_8", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 16, + 16, + 16 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_8", + "inbound_nodes": [ + [ + [ + "activation_7", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_9", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_9", + "inbound_nodes": [ + [ + [ + "conv3d_8", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_10", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_10", + "inbound_nodes": [ + [ + [ + "activation_9", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_11", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_11", + "inbound_nodes": [ + [ + [ + "conv3d_10", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_12", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_12", + "inbound_nodes": [ + [ + [ + "activation_11", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_13", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_13", + "inbound_nodes": [ + [ + [ + "conv3d_12", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_14", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_14", + "inbound_nodes": [ + [ + [ + "activation_13", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_15", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_15", + "inbound_nodes": [ + [ + [ + "conv3d_14", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_16", + "trainable": false, + "filters": 20, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_16", + "inbound_nodes": [ + [ + [ + "activation_15", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_17", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_17", + "inbound_nodes": [ + [ + [ + "conv3d_16", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "output", + "trainable": false, + "filters": 3, + "kernel_size": [ + 1, + 1, + 1 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "output", + "inbound_nodes": [ + [ + [ + "activation_17", + 0, + 0, + {} + ] + ] + ] + } + ], + "input_layers": [ + [ + "input", + 0, + 0 + ] + ], + "output_layers": [ + [ + "output", + 0, + 0 + ] + ] + } + } + }, + "weightsManifest": [ + { + "paths": [ + "model.bin" + ], + "weights": [ + { + "name": "conv3d_0/kernel", + "shape": [ + 3, + 3, + 3, + 1, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_0/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/kernel", + "shape": [ + 3, + 3, + 3, + 20, + 20 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/bias", + "shape": [ + 20 + ], + "dtype": "float32" + }, + { + "name": "output/kernel", + "shape": [ + 1, + 1, + 1, + 20, + 3 + ], + "dtype": "float32" + }, + { + "name": "output/bias", + "shape": [ + 3 + ], + "dtype": "float32" + } + ] + } + ] +} diff --git a/public/models/model21_104class/colorLUT.json b/public/models/model21_104class/colorLUT.json new file mode 100644 index 0000000..b321ef3 --- /dev/null +++ b/public/models/model21_104class/colorLUT.json @@ -0,0 +1,106 @@ +{ + "0": "rgb(0,0,0)", + "1": "rgb(25,100,40)", + "2": "rgb(125,100,160)", + "3": "rgb(100,25,0)", + "4": "rgb(220,20,100)", + "5": "rgb(220,20,10)", + "6": "rgb(180,220,140)", + "7": "rgb(220,60,220)", + "8": "rgb(180,40,120)", + "9": "rgb(140,20,140)", + "10": "rgb(20,30,140)", + "11": "rgb(35,75,50)", + "12": "rgb(225,140,140)", + "13": "rgb(200,35,75)", + "14": "rgb(160,100,50)", + "15": "rgb(20,220,60)", + "16": "rgb(60,220,60)", + "17": "rgb(220,180,140)", + "18": "rgb(20,100,50)", + "19": "rgb(220,60,20)", + "20": "rgb(120,100,60)", + "21": "rgb(220,20,20)", + "22": "rgb(220,180,220)", + "23": "rgb(60,20,220)", + "24": "rgb(160,140,180)", + "25": "rgb(80,20,140)", + "26": "rgb(75,50,125)", + "27": "rgb(20,220,160)", + "28": "rgb(20,180,140)", + "29": "rgb(140,220,220)", + "30": "rgb(80,160,20)", + "31": "rgb(100,0,100)", + "32": "rgb(70,70,70)", + "33": "rgb(150,150,200)", + "34": "rgb(255,192,32)", + "35": "rgb(25,100,40)", + "36": "rgb(125,100,160)", + "37": "rgb(100,25,0)", + "38": "rgb(220,20,100)", + "39": "rgb(220,20,10)", + "40": "rgb(180,220,140)", + "41": "rgb(220,60,220)", + "42": "rgb(180,40,120)", + "43": "rgb(140,20,140)", + "44": "rgb(20,30,140)", + "45": "rgb(35,75,50)", + "46": "rgb(225,140,140)", + "47": "rgb(200,35,75)", + "48": "rgb(160,100,50)", + "49": "rgb(20,220,60)", + "50": "rgb(60,220,60)", + "51": "rgb(220,180,140)", + "52": "rgb(20,100,50)", + "53": "rgb(220,60,20)", + "54": "rgb(120,100,60)", + "55": "rgb(220,20,20)", + "56": "rgb(220,180,220)", + "57": "rgb(60,20,220)", + "58": "rgb(160,140,180)", + "59": "rgb(80,20,140)", + "60": "rgb(75,50,125)", + "61": "rgb(20,220,160)", + "62": "rgb(20,180,140)", + "63": "rgb(140,220,220)", + "64": "rgb(80,160,20)", + "65": "rgb(100,0,100)", + "66": "rgb(70,70,70)", + "67": "rgb(150,150,200)", + "68": "rgb(255,192,32)", + "69": "rgb(0,118,14)", + "70": "rgb(0,118,14)", + "71": "rgb(122,186,220)", + "72": "rgb(122,186,220)", + "73": "rgb(236,13,176)", + "74": "rgb(236,13,176)", + "75": "rgb(12,48,255)", + "76": "rgb(13,48,255)", + "77": "rgb(220,216,20)", + "78": "rgb(220,216,20)", + "79": "rgb(103,255,255)", + "80": "rgb(103,255,255)", + "81": "rgb(255,165,0)", + "82": "rgb(255,165,0)", + "83": "rgb(165,42,42)", + "84": "rgb(165,42,42)", + "85": "rgb(245,245,245)", + "86": "rgb(245,245,245)", + "87": "rgb(120,18,134)", + "88": "rgb(196,58,250)", + "89": "rgb(120,18,134)", + "90": "rgb(196,58,250)", + "91": "rgb(204,182,142)", + "92": "rgb(42,204,164)", + "93": "rgb(60,60,60)", + "94": "rgb(119,159,176)", + "95": "rgb(220,248,164)", + "96": "rgb(220,248,164)", + "97": "rgb(230,148,34)", + "98": "rgb(230,148,34)", + "99": "rgb(0,0,64)", + "100": "rgb(0,0,112)", + "101": "rgb(0,0,160)", + "102": "rgb(0,0,208)", + "103": "rgb(0,0,255)" +} \ No newline at end of file diff --git a/public/models/model21_104class/colormap.json b/public/models/model21_104class/colormap.json new file mode 100644 index 0000000..f728661 --- /dev/null +++ b/public/models/model21_104class/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 25, 125, 100, 220, 220, 180, 220, 180, 140, 20, 35, 225, 200, 160, 20, 60, 220, 20, 220, 120, 220, 220, 60, 160, 80, 75, 20, 20, 140, 80, 100, 70, 150, 255, 25, 125, 100, 220, 220, 180, 220, 180, 140, 20, 35, 225, 200, 160, 20, 60, 220, 20, 220, 120, 220, 220, 60, 160, 80, 75, 20, 20, 140, 80, 100, 70, 150, 255, 0, 0, 122, 122, 236, 236, 12, 13, 220, 220, 103, 103, 255, 255, 165, 165, 245, 245, 120, 196, 120, 196, 204, 42, 60, 119, 220, 220, 230, 230, 0, 0, 0, 0, 0], + "G": [ 0, 100, 100, 25, 20, 20, 220, 60, 40, 20, 30, 75, 140, 35, 100, 220, 220, 180, 100, 60, 100, 20, 180, 20, 140, 20, 50, 220, 180, 220, 160, 0, 70, 150, 192, 100, 100, 25, 20, 20, 220, 60, 40, 20, 30, 75, 140, 35, 100, 220, 220, 180, 100, 60, 100, 20, 180, 20, 140, 20, 50, 220, 180, 220, 160, 0, 70, 150, 192, 118, 118, 186, 186, 13, 13, 48, 48, 216, 216, 255, 255, 165, 165, 42, 42, 245, 245, 18, 58, 18, 58, 182, 204, 60, 159, 248, 248, 148, 148, 0, 0, 0, 0, 0], + "B": [ 0, 40, 160, 0, 100, 10, 140, 220, 120, 140, 140, 50, 140, 75, 50, 60, 60, 140, 50, 20, 60, 20, 220, 220, 180, 140, 125, 160, 140, 220, 20, 100, 70, 200, 32, 40, 160, 0, 100, 10, 140, 220, 120, 140, 140, 50, 140, 75, 50, 60, 60, 140, 50, 20, 60, 20, 220, 220, 180, 140, 125, 160, 140, 220, 20, 100, 70, 200, 32, 14, 14, 220, 220, 176, 176, 255, 255, 20, 20, 255, 255, 0, 0, 42, 42, 245, 245, 134, 250, 134, 250, 142, 164, 60, 176, 164, 164, 34, 34, 64, 112, 160, 208, 255], + "labels": [ "BG", "ctx-lh-bankssts", "ctx-lh-caudalanteriorcingulate", "ctx-lh-caudalmiddlefrontal", "ctx-lh-cuneus", "ctx-lh-entorhinal", "ctx-lh-fusiform", "ctx-lh-inferiorparietal", "ctx-lh-inferiortemporal", "ctx-lh-isthmuscingulate", "ctx-lh-lateraloccipital", "ctx-lh-lateralorbitofrontal", "ctx-lh-lingual", "ctx-lh-medialorbitofrontal", "ctx-lh-middletemporal", "ctx-lh-parahippocampal", "ctx-lh-paracentral", "ctx-lh-parsopercularis", "ctx-lh-parsorbitalis", "ctx-lh-parstriangularis", "ctx-lh-pericalcarine", "ctx-lh-postcentral", "ctx-lh-posteriorcingulate", "ctx-lh-precentral", "ctx-lh-precuneus", "ctx-lh-rostralanteriorcingulate", "ctx-lh-rostralmiddlefrontal", "ctx-lh-superiorfrontal", "ctx-lh-superiorparietal", "ctx-lh-superiortemporal", "ctx-lh-supramarginal", "ctx-lh-frontalpole", "ctx-lh-temporalpole", "ctx-lh-transversetemporal", "ctx-lh-insula", "ctx-rh-bankssts", "ctx-rh-caudalanteriorcingulate", "ctx-rh-caudalmiddlefrontal", "ctx-rh-cuneus", "ctx-rh-entorhinal", "ctx-rh-fusiform", "ctx-rh-inferiorparietal", "ctx-rh-inferiortemporal", "ctx-rh-isthmuscingulate", "ctx-rh-lateraloccipital", "ctx-rh-lateralorbitofrontal", "ctx-rh-lingual", "ctx-rh-medialorbitofrontal", "ctx-rh-middletemporal", "ctx-rh-parahippocampal", "ctx-rh-paracentral", "ctx-rh-parsopercularis", "ctx-rh-parsorbitalis", "ctx-rh-parstriangularis", "ctx-rh-pericalcarine", "ctx-rh-postcentral", "ctx-rh-posteriorcingulate", "ctx-rh-precentral", "ctx-rh-precuneus", "ctx-rh-rostralanteriorcingulate", "ctx-rh-rostralmiddlefrontal", "ctx-rh-superiorfrontal", "ctx-rh-superiorparietal", "ctx-rh-superiortemporal", "ctx-rh-supramarginal", "ctx-rh-frontalpole", "ctx-rh-temporalpole", "ctx-rh-transversetemporal", "ctx-rh-insula", "Left-Thalamus-Proper*", "Right-Thalamus-Proper*", "Left-Caudate", "Right-Caudate", "Left-Putamen", "Right-Putamen", "Left-Pallidum", "Right-Pallidum", "Left-Hippocampus", "Right-Hippocampus", "Left-Amygdala", "Right-Amygdala", "Left-Accumbens-area", "Right-Accumbens-area", "Left-VentralDC", "Right-VentralDC", "Left-Cerebral-White-Matter", "Right-Cerebral-White-Matter", "Left-Lateral-Ventricle", "Left-Inf-Lat-Vent", "Right-Lateral-Ventricle", "Right-Inf-Lat-Vent", "3rd-Ventricle", "4th-Ventricle", "CSF", "Brain-Stem", "Left-Cerebellum-White-Matter", "Right-Cerebellum-White-Matter", "Left-Cerebellum-Cortex", "Right-Cerebellum-Cortex", "CC_Posterior", "CC_Mid_Posterior", "CC_Central", "CC_Mid_Anterior", "CC_Anterior"] +} \ No newline at end of file diff --git a/public/models/model21_104class/group1-shard1of1.bin b/public/models/model21_104class/group1-shard1of1.bin new file mode 100644 index 0000000..da65df9 Binary files /dev/null and b/public/models/model21_104class/group1-shard1of1.bin differ diff --git a/public/models/model21_104class/labels.json b/public/models/model21_104class/labels.json new file mode 100644 index 0000000..1b69b32 --- /dev/null +++ b/public/models/model21_104class/labels.json @@ -0,0 +1,106 @@ +{ + "0": "BG", + "1": "ctx-lh-bankssts", + "2": "ctx-lh-caudalanteriorcingulate", + "3": "ctx-lh-caudalmiddlefrontal", + "4": "ctx-lh-cuneus", + "5": "ctx-lh-entorhinal", + "6": "ctx-lh-fusiform", + "7": "ctx-lh-inferiorparietal", + "8": "ctx-lh-inferiortemporal", + "9": "ctx-lh-isthmuscingulate", + "10": "ctx-lh-lateraloccipital", + "11": "ctx-lh-lateralorbitofrontal", + "12": "ctx-lh-lingual", + "13": "ctx-lh-medialorbitofrontal", + "14": "ctx-lh-middletemporal", + "15": "ctx-lh-parahippocampal", + "16": "ctx-lh-paracentral", + "17": "ctx-lh-parsopercularis", + "18": "ctx-lh-parsorbitalis", + "19": "ctx-lh-parstriangularis", + "20": "ctx-lh-pericalcarine", + "21": "ctx-lh-postcentral", + "22": "ctx-lh-posteriorcingulate", + "23": "ctx-lh-precentral", + "24": "ctx-lh-precuneus", + "25": "ctx-lh-rostralanteriorcingulate", + "26": "ctx-lh-rostralmiddlefrontal", + "27": "ctx-lh-superiorfrontal", + "28": "ctx-lh-superiorparietal", + "29": "ctx-lh-superiortemporal", + "30": "ctx-lh-supramarginal", + "31": "ctx-lh-frontalpole", + "32": "ctx-lh-temporalpole", + "33": "ctx-lh-transversetemporal", + "34": "ctx-lh-insula", + "35": "ctx-rh-bankssts", + "36": "ctx-rh-caudalanteriorcingulate", + "37": "ctx-rh-caudalmiddlefrontal", + "38": "ctx-rh-cuneus", + "39": "ctx-rh-entorhinal", + "40": "ctx-rh-fusiform", + "41": "ctx-rh-inferiorparietal", + "42": "ctx-rh-inferiortemporal", + "43": "ctx-rh-isthmuscingulate", + "44": "ctx-rh-lateraloccipital", + "45": "ctx-rh-lateralorbitofrontal", + "46": "ctx-rh-lingual", + "47": "ctx-rh-medialorbitofrontal", + "48": "ctx-rh-middletemporal", + "49": "ctx-rh-parahippocampal", + "50": "ctx-rh-paracentral", + "51": "ctx-rh-parsopercularis", + "52": "ctx-rh-parsorbitalis", + "53": "ctx-rh-parstriangularis", + "54": "ctx-rh-pericalcarine", + "55": "ctx-rh-postcentral", + "56": "ctx-rh-posteriorcingulate", + "57": "ctx-rh-precentral", + "58": "ctx-rh-precuneus", + "59": "ctx-rh-rostralanteriorcingulate", + "60": "ctx-rh-rostralmiddlefrontal", + "61": "ctx-rh-superiorfrontal", + "62": "ctx-rh-superiorparietal", + "63": "ctx-rh-superiortemporal", + "64": "ctx-rh-supramarginal", + "65": "ctx-rh-frontalpole", + "66": "ctx-rh-temporalpole", + "67": "ctx-rh-transversetemporal", + "68": "ctx-rh-insula", + "69": "Left-Thalamus-Proper*", + "70": "Right-Thalamus-Proper*", + "71": "Left-Caudate", + "72": "Right-Caudate", + "73": "Left-Putamen", + "74": "Right-Putamen", + "75": "Left-Pallidum", + "76": "Right-Pallidum", + "77": "Left-Hippocampus", + "78": "Right-Hippocampus", + "79": "Left-Amygdala", + "80": "Right-Amygdala", + "81": "Left-Accumbens-area", + "82": "Right-Accumbens-area", + "83": "Left-VentralDC", + "84": "Right-VentralDC", + "85": "Left-Cerebral-White-Matter", + "86": "Right-Cerebral-White-Matter", + "87": "Left-Lateral-Ventricle", + "88": "Left-Inf-Lat-Vent", + "89": "Right-Lateral-Ventricle", + "90": "Right-Inf-Lat-Vent", + "91": "3rd-Ventricle", + "92": "4th-Ventricle", + "93": "CSF", + "94": "Brain-Stem", + "95": "Left-Cerebellum-White-Matter", + "96": "Right-Cerebellum-White-Matter", + "97": "Left-Cerebellum-Cortex", + "98": "Right-Cerebellum-Cortex", + "99": "CC_Posterior", + "100": "CC_Mid_Posterior", + "101": "CC_Central", + "102": "CC_Mid_Anterior", + "103": "CC_Anterior" +} \ No newline at end of file diff --git a/public/models/model21_104class/model.json b/public/models/model21_104class/model.json new file mode 100644 index 0000000..cac7c69 --- /dev/null +++ b/public/models/model21_104class/model.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.6.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.6.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 104, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "31/bias", "shape": [21], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "33/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 104], "dtype": "float32"}, {"name": "output/bias", "shape": [104], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model21_104class/model_D95.json b/public/models/model21_104class/model_D95.json new file mode 100644 index 0000000..669f199 --- /dev/null +++ b/public/models/model21_104class/model_D95.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.6.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.6.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 95, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 104, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "31/bias", "shape": [21], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "33/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 104], "dtype": "float32"}, {"name": "output/bias", "shape": [104], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model21_3class/colorLUT.json b/public/models/model21_3class/colorLUT.json new file mode 100644 index 0000000..6da374a --- /dev/null +++ b/public/models/model21_3class/colorLUT.json @@ -0,0 +1 @@ +{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model21_3class/group1-shard1of1.bin b/public/models/model21_3class/group1-shard1of1.bin new file mode 100644 index 0000000..2ebba53 Binary files /dev/null and b/public/models/model21_3class/group1-shard1of1.bin differ diff --git a/public/models/model21_3class/labels.json b/public/models/model21_3class/labels.json new file mode 100644 index 0000000..4885a94 --- /dev/null +++ b/public/models/model21_3class/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model21_3class/model.json b/public/models/model21_3class/model.json new file mode 100644 index 0000000..4f6e028 --- /dev/null +++ b/public/models/model21_3class/model.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 64, 64, 64, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "input.1/bias", "shape": [21], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.12/bias", "shape": [21], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.16/bias", "shape": [21], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.20/bias", "shape": [21], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.24/bias", "shape": [21], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.28/bias", "shape": [21], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.32/bias", "shape": [21], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.4/bias", "shape": [21], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.8/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model30chan18cls/colorLUT.json b/public/models/model30chan18cls/colorLUT.json new file mode 100644 index 0000000..27d12d1 --- /dev/null +++ b/public/models/model30chan18cls/colorLUT.json @@ -0,0 +1,21 @@ +{ + "0": "rgb(0,0,0)", + "1": "rgb(245,245,245)", + "2": "rgb(205,62,78)", + "3": "rgb(120,18,134)", + "4": "rgb(196,58,250)", + "5": "rgb(220,248,164)", + "6": "rgb(230,148,34)", + "7": "rgb(0,118,14)", + "8": "rgb(122,186,220)", + "9": "rgb(236,13,176)", + "10": "rgb(12,48,255)", + "11": "rgb(204,182,142)", + "12": "rgb(42,204,164)", + "13": "rgb(119,159,176)", + "14": "rgb(220,216,20)", + "15": "rgb(103,255,255)", + "16": "rgb(255,165,0)", + "17": "rgb(165,42,42)" +} + diff --git a/public/models/model30chan18cls/colormap.json b/public/models/model30chan18cls/colormap.json new file mode 100644 index 0000000..ee61421 --- /dev/null +++ b/public/models/model30chan18cls/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 245, 205, 120, 196, 220, 230, 0, 122, 236, 12, 204, 42, 119, 220, 103, 255, 165], + "G": [ 0, 245, 62, 18, 58, 248, 148, 118, 186, 13, 48, 182, 204, 159, 216, 255, 165, 42], + "B": [ 0, 245, 78, 134, 250, 164, 34, 14, 220, 176, 255, 142, 164, 176, 20, 255, 0, 42], + "labels": [ "Unknown", "Cerebral-White-Matter", "Cerebral-Cortex", "Lateral-Ventricle", "Inferior-Lateral-Ventricle", "Cerebellum-White-Matter", "Cerebellum-Cortex", "Thalamus", "Caudate", "Putamen", "Pallidum", "3rd-Ventricle", "4th-Ventricle", "Brain-Stem", "Hippocampus", "Amygdala", "Accumbens-area", "VentralDC"] +} \ No newline at end of file diff --git a/public/models/model30chan18cls/labels.json b/public/models/model30chan18cls/labels.json new file mode 100644 index 0000000..d022502 --- /dev/null +++ b/public/models/model30chan18cls/labels.json @@ -0,0 +1,20 @@ +{ + "0": "Unknown", + "1": "Cerebral-White-Matter", + "2": "Cerebral-Cortex", + "3": "Lateral-Ventricle", + "4": "Inferior-Lateral-Ventricle", + "5": "Cerebellum-White-Matter", + "6": "Cerebellum-Cortex", + "7": "Thalamus", + "8": "Caudate", + "9": "Putamen", + "10": "Pallidum", + "11": "3rd-Ventricle", + "12": "4th-Ventricle", + "13": "Brain-Stem", + "14": "Hippocampus", + "15": "Amygdala", + "16": "Accumbens-area", + "17": "VentralDC" +} diff --git a/public/models/model30chan18cls/model.bin b/public/models/model30chan18cls/model.bin new file mode 100644 index 0000000..3459133 Binary files /dev/null and b/public/models/model30chan18cls/model.bin differ diff --git a/public/models/model30chan18cls/model.json b/public/models/model30chan18cls/model.json new file mode 100644 index 0000000..179715b --- /dev/null +++ b/public/models/model30chan18cls/model.json @@ -0,0 +1,808 @@ +{ + "format": "layers-model", + "generatedBy": "keras v2.7.0", + "convertedBy": "TensorFlow.js Converter v3.9.0", + "modelTopology": { + "keras_version": "2.6.0", + "backend": "tensorflow", + "model_config": { + "class_name": "Functional", + "config": { + "name": "model", + "layers": [ + { + "class_name": "InputLayer", + "config": { + "batch_input_shape": [ + null, + 256, + 256, + 256, + 1 + ], + "dtype": "float32", + "sparse": false, + "ragged": false, + "name": "input" + }, + "name": "input", + "inbound_nodes": [] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_0", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_0", + "inbound_nodes": [ + [ + [ + "input", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_1", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_1", + "inbound_nodes": [ + [ + [ + "conv3d_0", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_2", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_2", + "inbound_nodes": [ + [ + [ + "activation_1", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_3", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_3", + "inbound_nodes": [ + [ + [ + "conv3d_2", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_4", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_4", + "inbound_nodes": [ + [ + [ + "activation_3", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_5", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_5", + "inbound_nodes": [ + [ + [ + "conv3d_4", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_6", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_6", + "inbound_nodes": [ + [ + [ + "activation_5", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_7", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_7", + "inbound_nodes": [ + [ + [ + "conv3d_6", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_8", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 16, + 16, + 16 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_8", + "inbound_nodes": [ + [ + [ + "activation_7", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_9", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_9", + "inbound_nodes": [ + [ + [ + "conv3d_8", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_10", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_10", + "inbound_nodes": [ + [ + [ + "activation_9", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_11", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_11", + "inbound_nodes": [ + [ + [ + "conv3d_10", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_12", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_12", + "inbound_nodes": [ + [ + [ + "activation_11", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_13", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_13", + "inbound_nodes": [ + [ + [ + "conv3d_12", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_14", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_14", + "inbound_nodes": [ + [ + [ + "activation_13", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_15", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_15", + "inbound_nodes": [ + [ + [ + "conv3d_14", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_16", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_16", + "inbound_nodes": [ + [ + [ + "activation_15", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_17", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_17", + "inbound_nodes": [ + [ + [ + "conv3d_16", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "output", + "trainable": false, + "filters": 18, + "kernel_size": [ + 1, + 1, + 1 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "output", + "inbound_nodes": [ + [ + [ + "activation_17", + 0, + 0, + {} + ] + ] + ] + } + ], + "input_layers": [ + [ + "input", + 0, + 0 + ] + ], + "output_layers": [ + [ + "output", + 0, + 0 + ] + ] + } + } + }, + "weightsManifest": [ + { + "paths": [ + "model.bin" + ], + "weights": [ + { + "name": "conv3d_0/kernel", + "shape": [ + 3, + 3, + 3, + 1, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_0/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "output/kernel", + "shape": [ + 1, + 1, + 1, + 30, + 18 + ], + "dtype": "float32" + }, + { + "name": "output/bias", + "shape": [ + 18 + ], + "dtype": "float32" + } + ] + } + ] +} \ No newline at end of file diff --git a/public/models/model30chan50cls/colorLUT.json b/public/models/model30chan50cls/colorLUT.json new file mode 100644 index 0000000..99c0f7e --- /dev/null +++ b/public/models/model30chan50cls/colorLUT.json @@ -0,0 +1,52 @@ +{ + "0": "rgb(0,0,0)", + "1": "rgb(245,245,245)", + "2": "rgb(196,58,250)", + "3": "rgb(220,248,164)", + "4": "rgb(230,148,34)", + "5": "rgb(0,118,14)", + "6": "rgb(122,186,220)", + "7": "rgb(236,13,176)", + "8": "rgb(12,48,255)", + "9": "rgb(119,159,176)", + "10": "rgb(220,216,20)", + "11": "rgb(103,255,255)", + "12": "rgb(60,60,60)", + "13": "rgb(255,165,0)", + "14": "rgb(165,42,42)", + "15": "rgb(0,0,208)", + "16": "rgb(25,100,40)", + "17": "rgb(125,100,160)", + "18": "rgb(100,25,0)", + "19": "rgb(220,20,100)", + "20": "rgb(220,20,10)", + "21": "rgb(180,220,140)", + "22": "rgb(220,60,220)", + "23": "rgb(180,40,120)", + "24": "rgb(140,20,140)", + "25": "rgb(20,30,140)", + "26": "rgb(35,75,50)", + "27": "rgb(225,140,140)", + "28": "rgb(200,35,75)", + "29": "rgb(160,100,50)", + "30": "rgb(20,220,60)", + "31": "rgb(60,220,60)", + "32": "rgb(220,180,140)", + "33": "rgb(20,100,50)", + "34": "rgb(220,60,20)", + "35": "rgb(120,100,60)", + "36": "rgb(220,20,20)", + "37": "rgb(220,180,220)", + "38": "rgb(60,20,220)", + "39": "rgb(160,140,180)", + "40": "rgb(80,20,140)", + "41": "rgb(75,50,125)", + "42": "rgb(20,220,160)", + "43": "rgb(20,180,140)", + "44": "rgb(140,220,220)", + "45": "rgb(80,160,20)", + "46": "rgb(100,0,100)", + "47": "rgb(70,70,70)", + "48": "rgb(150,150,200)", + "49": "rgb(255,192,32)" +} \ No newline at end of file diff --git a/public/models/model30chan50cls/colormap.json b/public/models/model30chan50cls/colormap.json new file mode 100644 index 0000000..9eaec04 --- /dev/null +++ b/public/models/model30chan50cls/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 245, 196, 220, 230, 0, 122, 236, 12, 119, 220, 103, 60, 255, 165, 0, 25, 125, 100, 220, 220, 180, 220, 180, 140, 20, 35, 225, 200, 160, 20, 60, 220, 20, 220, 120, 220, 220, 60, 160, 80, 75, 20, 20, 140, 80, 100, 70, 150, 255], + "G": [ 0, 245, 58, 248, 148, 118, 186, 13, 48, 159, 216, 255, 60, 165, 42, 0, 100, 100, 25, 20, 20, 220, 60, 40, 20, 30, 75, 140, 35, 100, 220, 220, 180, 100, 60, 100, 20, 180, 20, 140, 20, 50, 220, 180, 220, 160, 0, 70, 150, 192], + "B": [ 0, 245, 250, 164, 34, 14, 220, 176, 255, 176, 20, 255, 60, 0, 42, 208, 40, 160, 0, 100, 10, 140, 220, 120, 140, 140, 50, 140, 75, 50, 60, 60, 140, 50, 20, 60, 20, 220, 220, 180, 140, 125, 160, 140, 220, 20, 100, 70, 200, 32], + "labels": [ "BG", "Cerebral-White-Matter", "Ventricle", "Cerebellum-White-Matter", "Cerebellum", "Thalamus-Proper*", "Caudate", "Putamen", "Pallidum", "Brain-Stem", "Hippocampus", "Amygdala", "CSF", "Accumbens-area", "VentralDC", "Corpus callosum", "ctx-bankssts", "ctx-caudalanteriorcingulate", "ctx-caudalmiddlefrontal", "ctx-cuneus", "ctx-entorhinal", "ctx-fusiform", "ctx-inferiorparietal", "ctx-inferiortemporal", "ctx-isthmuscingulate", "ctx-lateraloccipital", "ctx-lateralorbitofrontal", "ctx-lingual", "ctx-medialorbitofrontal", "ctx-middletemporal", "ctx-parahippocampal", "ctx-paracentral", "ctx-parsopercularis", "ctx-parsorbitalis", "ctx-parstriangularis", "ctx-pericalcarine", "ctx-postcentral", "ctx-posteriorcingulate", "ctx-precentral", "ctx-precuneus", "ctx-rostralanteriorcingulate", "ctx-rostralmiddlefrontal", "ctx-superiorfrontal", "ctx-superiorparietal", "ctx-superiortemporal", "ctx-supramarginal", "ctx-frontalpole", "ctx-temporalpole", "ctx-transversetemporal", "ctx-insula"] +} \ No newline at end of file diff --git a/public/models/model30chan50cls/labels.json b/public/models/model30chan50cls/labels.json new file mode 100644 index 0000000..2d48813 --- /dev/null +++ b/public/models/model30chan50cls/labels.json @@ -0,0 +1,52 @@ +{ + "0": "BG", + "1": "Cerebral-White-Matter", + "2": "Ventricle", + "3": "Cerebellum-White-Matter", + "4": "Cerebellum", + "5": "Thalamus-Proper*", + "6": "Caudate", + "7": "Putamen", + "8": "Pallidum", + "9": "Brain-Stem", + "10": "Hippocampus", + "11": "Amygdala", + "12": "CSF", + "13": "Accumbens-area", + "14": "VentralDC", + "15": "Corpus callosum", + "16": "ctx-bankssts", + "17": "ctx-caudalanteriorcingulate", + "18": "ctx-caudalmiddlefrontal", + "19": "ctx-cuneus", + "20": "ctx-entorhinal", + "21": "ctx-fusiform", + "22": "ctx-inferiorparietal", + "23": "ctx-inferiortemporal", + "24": "ctx-isthmuscingulate", + "25": "ctx-lateraloccipital", + "26": "ctx-lateralorbitofrontal", + "27": "ctx-lingual", + "28": "ctx-medialorbitofrontal", + "29": "ctx-middletemporal", + "30": "ctx-parahippocampal", + "31": "ctx-paracentral", + "32": "ctx-parsopercularis", + "33": "ctx-parsorbitalis", + "34": "ctx-parstriangularis", + "35": "ctx-pericalcarine", + "36": "ctx-postcentral", + "37": "ctx-posteriorcingulate", + "38": "ctx-precentral", + "39": "ctx-precuneus", + "40": "ctx-rostralanteriorcingulate", + "41": "ctx-rostralmiddlefrontal", + "42": "ctx-superiorfrontal", + "43": "ctx-superiorparietal", + "44": "ctx-superiortemporal", + "45": "ctx-supramarginal", + "46": "ctx-frontalpole", + "47": "ctx-temporalpole", + "48": "ctx-transversetemporal", + "49": "ctx-insula" +} diff --git a/public/models/model30chan50cls/model.bin b/public/models/model30chan50cls/model.bin new file mode 100644 index 0000000..07e1df3 Binary files /dev/null and b/public/models/model30chan50cls/model.bin differ diff --git a/public/models/model30chan50cls/model.json b/public/models/model30chan50cls/model.json new file mode 100644 index 0000000..a49c024 --- /dev/null +++ b/public/models/model30chan50cls/model.json @@ -0,0 +1,811 @@ +{ + "_comment": "Normalize the data for this model with min = 5% quantile, max = 95% quantile", + "_model_location": "~/craft/meshnet/enmesh2/logs/tmp/curriculum_enmesh_30channels_50/model.last.pth", + "_wandb": "https://wandb.ai/neuroneural/curriculum_30_50cls", + "format": "layers-model", + "generatedBy": "keras v2.7.0", + "convertedBy": "TensorFlow.js Converter v3.9.0", + "modelTopology": { + "keras_version": "2.6.0", + "backend": "tensorflow", + "model_config": { + "class_name": "Functional", + "config": { + "name": "model", + "layers": [ + { + "class_name": "InputLayer", + "config": { + "batch_input_shape": [ + null, + 256, + 256, + 256, + 1 + ], + "dtype": "float32", + "sparse": false, + "ragged": false, + "name": "input" + }, + "name": "input", + "inbound_nodes": [] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_0", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_0", + "inbound_nodes": [ + [ + [ + "input", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_1", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_1", + "inbound_nodes": [ + [ + [ + "conv3d_0", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_2", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_2", + "inbound_nodes": [ + [ + [ + "activation_1", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_3", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_3", + "inbound_nodes": [ + [ + [ + "conv3d_2", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_4", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_4", + "inbound_nodes": [ + [ + [ + "activation_3", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_5", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_5", + "inbound_nodes": [ + [ + [ + "conv3d_4", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_6", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_6", + "inbound_nodes": [ + [ + [ + "activation_5", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_7", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_7", + "inbound_nodes": [ + [ + [ + "conv3d_6", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_8", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 16, + 16, + 16 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_8", + "inbound_nodes": [ + [ + [ + "activation_7", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_9", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_9", + "inbound_nodes": [ + [ + [ + "conv3d_8", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_10", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 8, + 8, + 8 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_10", + "inbound_nodes": [ + [ + [ + "activation_9", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_11", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_11", + "inbound_nodes": [ + [ + [ + "conv3d_10", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_12", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 4, + 4, + 4 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_12", + "inbound_nodes": [ + [ + [ + "activation_11", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_13", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_13", + "inbound_nodes": [ + [ + [ + "conv3d_12", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_14", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 2, + 2, + 2 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_14", + "inbound_nodes": [ + [ + [ + "activation_13", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_15", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_15", + "inbound_nodes": [ + [ + [ + "conv3d_14", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "conv3d_16", + "trainable": false, + "filters": 30, + "kernel_size": [ + 3, + 3, + 3 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "conv3d_16", + "inbound_nodes": [ + [ + [ + "activation_15", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Activation", + "config": { + "name": "activation_17", + "trainable": false, + "dtype": "float32", + "activation": "elu" + }, + "name": "activation_17", + "inbound_nodes": [ + [ + [ + "conv3d_16", + 0, + 0, + {} + ] + ] + ] + }, + { + "class_name": "Conv3D", + "config": { + "name": "output", + "trainable": false, + "filters": 50, + "kernel_size": [ + 1, + 1, + 1 + ], + "strides": [ + 1, + 1, + 1 + ], + "dilation_rate": [ + 1, + 1, + 1 + ], + "padding": "same", + "data_format": "channels_last", + "activation": "linear", + "use_bias": true, + "dtype": "float32" + }, + "name": "output", + "inbound_nodes": [ + [ + [ + "activation_17", + 0, + 0, + {} + ] + ] + ] + } + ], + "input_layers": [ + [ + "input", + 0, + 0 + ] + ], + "output_layers": [ + [ + "output", + 0, + 0 + ] + ] + } + } + }, + "weightsManifest": [ + { + "paths": [ + "model.bin" + ], + "weights": [ + { + "name": "conv3d_0/kernel", + "shape": [ + 3, + 3, + 3, + 1, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_0/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_2/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_4/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_6/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_8/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_10/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_12/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_14/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/kernel", + "shape": [ + 3, + 3, + 3, + 30, + 30 + ], + "dtype": "float32" + }, + { + "name": "conv3d_16/bias", + "shape": [ + 30 + ], + "dtype": "float32" + }, + { + "name": "output/kernel", + "shape": [ + 1, + 1, + 1, + 30, + 50 + ], + "dtype": "float32" + }, + { + "name": "output/bias", + "shape": [ + 50 + ], + "dtype": "float32" + } + ] + } + ] +} diff --git a/public/models/model5_gw_ae/colorLUT.json b/public/models/model5_gw_ae/colorLUT.json new file mode 100644 index 0000000..6da374a --- /dev/null +++ b/public/models/model5_gw_ae/colorLUT.json @@ -0,0 +1 @@ +{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model5_gw_ae/colormap.json b/public/models/model5_gw_ae/colormap.json new file mode 100644 index 0000000..9838f5d --- /dev/null +++ b/public/models/model5_gw_ae/colormap.json @@ -0,0 +1,6 @@ +{ + "R": [0, 255], + "G": [0, 0], + "B": [0, 0], + "labels": ["background", "brain"] +} \ No newline at end of file diff --git a/public/models/model5_gw_ae/colormap3.json b/public/models/model5_gw_ae/colormap3.json new file mode 100644 index 0000000..2343358 --- /dev/null +++ b/public/models/model5_gw_ae/colormap3.json @@ -0,0 +1,6 @@ +{ + "R": [ 0, 255, 205], + "G": [ 0, 255, 62], + "B": [ 0, 255, 78], + "labels": [ "background", "White Matter", "Grey Matter"] +} \ No newline at end of file diff --git a/public/models/model5_gw_ae/group1-shard1of1.bin b/public/models/model5_gw_ae/group1-shard1of1.bin new file mode 100644 index 0000000..1058e4f Binary files /dev/null and b/public/models/model5_gw_ae/group1-shard1of1.bin differ diff --git a/public/models/model5_gw_ae/labels.json b/public/models/model5_gw_ae/labels.json new file mode 100644 index 0000000..4885a94 --- /dev/null +++ b/public/models/model5_gw_ae/labels.json @@ -0,0 +1 @@ +{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model5_gw_ae/model.json b/public/models/model5_gw_ae/model.json new file mode 100644 index 0000000..9399dd6 --- /dev/null +++ b/public/models/model5_gw_ae/model.json @@ -0,0 +1 @@ +{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "35", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "35", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["35", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "37", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "37", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["37", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "21/kernel", "shape": [3, 3, 3, 1, 5], "dtype": "float32"}, {"name": "21/bias", "shape": [5], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "23/bias", "shape": [5], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "25/bias", "shape": [5], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "27/bias", "shape": [5], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "29/bias", "shape": [5], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "31/bias", "shape": [5], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "33/bias", "shape": [5], "dtype": "float32"}, {"name": "35/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "35/bias", "shape": [5], "dtype": "float32"}, {"name": "37/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "37/bias", "shape": [5], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 5, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/niivue.css b/public/niivue.css new file mode 100644 index 0000000..4c13be6 --- /dev/null +++ b/public/niivue.css @@ -0,0 +1,130 @@ +html { + height: auto; + min-height: 100%; + margin: 0; +} +body { + display: flex; + flex-direction: column; + margin: 0; + min-height: 100%; + width: 100%; + position: absolute; + font-family: system-ui, Arial, Helvetica, sans-serif; + user-select: none; /* Standard syntax */ + color: white; + background: #303030; +} +header { + margin: 10px; +} +main { + flex: 1; + background: #000000; + position: relative; +} +footer { + margin: 10px; +} +canvas { + position: absolute; + cursor: crosshair; +} +canvas:focus { + outline: 0px; +} +div { + display: table-row; +} +.dropdown { + float: left; + overflow: hidden; +} +.dropdown .dropbtn { + font-size: 16px; + border: none; + outline: none; + color: white; + padding: 12px 12px; + background-color: #303030; + font-family: inherit; + margin: 0; +} +.dropdown:hover .dropbtn { + background-color: #9a9; +} +.dropdown-content { + display: none; + position: absolute; + background-color: #303030; + min-width: 160px; + border-radius: 5px; + box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2); + z-index: 1; +} +.dropdown-content a { + float: none; + color: white; + padding: 12px 16px; + text-decoration: none; + display: block; + text-align: left; + line-height: 6px; +} +.dropdown-content a:hover { + background-color: #aba; +} +.dropdown:hover .dropdown-content { + display: block; +} +.dropdown-item-checked::before { + position: absolute; + left: 0.2rem; + content: "\2022"; /* or '✓' */ + font-weight: 600; +} +.divider { + border-top: 1px solid grey; +} +.vertical-divider { + border-left: 1px solid grey; + height: 40px; +} +.help-text { + margin: auto; + max-width: 150px; + padding: 0 10px; +} +.slidecontainer { + padding: 10px 10px; + white-space: normal; + word-break: break-word; + display: flex; + align-items: center; + flex: 0 0 auto; +} + +div.footer { width: 100%; display: block; background: #303030;} +table.footer { width: 100%;height: 100%; table-layout: fixed;} + +.loading-circle { + display: inline-block; + width: 20px; + height: 20px; + border: 3px solid rgba(255, 255, 255, 0.3); + border-radius: 50%; + border-top-color: #fff; + animation: spin 1s ease-in-out infinite; + margin-left: 10px; + vertical-align: middle; +} + +@keyframes spin { + to { + transform: rotate(360deg); + } +} + +.hidden { + display: none; +} \ No newline at end of file diff --git a/public/t1_crop.nii.gz b/public/t1_crop.nii.gz new file mode 100644 index 0000000..d624caa Binary files /dev/null and b/public/t1_crop.nii.gz differ diff --git a/tensor-utils.js b/tensor-utils.js new file mode 100644 index 0000000..4286c93 --- /dev/null +++ b/tensor-utils.js @@ -0,0 +1,613 @@ +import * as tf from '@tensorflow/tfjs' +import { BWLabeler } from './bwlabels.js' + +export async function addZeroPaddingTo3dTensor(tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + return tensor3d.pad([rowPadArr, colPadArr, depthPadArr]) +} + +export async function applyMriThreshold(tensor, percentage) { + // Perform asynchronous operations outside of tf.tidy + const maxTensor = tensor.max() + const thresholdTensor = maxTensor.mul(percentage) + const threshold = await thresholdTensor.data() // Extracts the threshold value + + // Dispose tensors not needed anymore + maxTensor.dispose() + thresholdTensor.dispose() + + // Use tf.tidy for synchronous operations + return tf.tidy(() => { + const dataForProcessing = tensor.clone() + + // Thresholding (assuming background has very low values compared to the head) + const mask = dataForProcessing.greater(threshold[0]) + // -- const denoisedMriData = dataForProcessing.mul(mask) + + // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto. + return mask + }) + + // -- return denoisedMriData +} + +export async function binarizeVolumeDataTensor(volumeDataTensor) { + const alpha = 0 + // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0] + return volumeDataTensor.step(alpha) +} + +async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) { + // Flatten the tensor + const flatTensor = tensor.flatten() + + // Convert the flattened tensor to an array to sort it + const flatArray = await flatTensor.array() + flatArray.sort((a, b) => a - b) // Sort the array in ascending order + + // Convert the sorted array back to a tensor + const sortedTensor = tf.tensor1d(flatArray) + + // Calculate the indices for the quantiles + const numElements = sortedTensor.shape[0] + const lowIndex = Math.floor(numElements * lowerQuantile) + const highIndex = Math.ceil(numElements * upperQuantile) - 1 // Subtract 1 because indices are 0-based + + // Slice the sorted tensor to get qmin and qmax + const qmin = sortedTensor.slice(lowIndex, 1) // Get the value at the low index + const qmax = sortedTensor.slice(highIndex, 1) // Get the value at the high index + + // Get the actual values from the tensors + const qminValue = (await qmin.array())[0] + const qmaxValue = (await qmax.array())[0] + + // Clean up tensors to free memory + flatTensor.dispose() + sortedTensor.dispose() + qmin.dispose() + qmax.dispose() + + return { qmin: qminValue, qmax: qmaxValue } +} + +export async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) { + const inChannels = input.shape[4] + const outChannels = filter.shape[4] + + // Create an empty array to hold the output channels + let outputChannels = null + + // Slice the input tensor and process one output channel at a time + for (let channel = 0; channel < outChannels; channel++) { + const numSlices = Math.ceil(inChannels / sliceSize) + const biasesSlice = biases.slice([channel], [1]) + let outputChannel = null + + for (let i = 0; i < numSlices; i++) { + const startChannel = i * sliceSize + const endChannel = Math.min((i + 1) * sliceSize, inChannels) + + // Only proceed if there are channels to process + if (startChannel < inChannels) { + const resultSlice = tf.tidy(() => { + const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]) + const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]) + // Perform the convolution for the current slice and output channel + return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + }) + + if (outputChannel === null) { + outputChannel = resultSlice + } else { + const updatedOutputChannel = outputChannel.add(resultSlice) + outputChannel.dispose() + resultSlice.dispose() + outputChannel = updatedOutputChannel + } + } + } + + // Add the biases to the accumulated convolutions for this channel + const biasedOutputChannel = outputChannel.add(biasesSlice) + outputChannel.dispose() + biasesSlice.dispose() + + // Accumulate the channel to the output array + if (outputChannels == null) { + outputChannels = biasedOutputChannel + } else { + const updatedOutputChannels = await tf.concat([outputChannels, biasedOutputChannel], 4) + biasedOutputChannel.dispose() + outputChannels.dispose() + outputChannels = updatedOutputChannels + } + } + + return outputChannels +} + +export async function draw3dObjBoundingVolume(unstackOutVolumeTensor, opts, modelEntry, callbackImg) { + const allOutputSlices3DCC = [] + + // dataSync() using to flatten array. Takes around 1.5 s + for (let sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++) { + allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync()) + } + + // Use this conversion to download output slices as nii file. Takes around 30 ms + // does not use `push` to avoid stack overflows. In future: consider .set() with typed arrays + const allOutputSlices3DCC1DimArray = new Array(allOutputSlices3DCC[0].length * allOutputSlices3DCC.length) + let index = 0 + for (let sliceIdx = 0; sliceIdx < allOutputSlices3DCC.length; sliceIdx++) { + for (let i = 0; i < allOutputSlices3DCC[sliceIdx].length; i++) { + allOutputSlices3DCC1DimArray[index++] = allOutputSlices3DCC[sliceIdx][i] + } + } + console.log('Done with allOutputSlices3DCC1DimArray ') + const brainMaskTensor1d = await binarizeVolumeDataTensor(tf.tensor1d(allOutputSlices3DCC1DimArray)) + const brainOut = Array.from(brainMaskTensor1d.dataSync()) + callbackImg(brainOut, opts, modelEntry) +} +// return first and last non-zero voxel in row (dim = 0), column (1) or slice (2) dimension +async function firstLastNonZero(tensor3D, dim = 0) { + let mxs = [] + if (dim === 0) { + mxs = await tensor3D.max(2).max(1).arraySync() + } else if (dim === 1) { + mxs = await tensor3D.max(2).max(0).arraySync() + } else { + mxs = await tensor3D.max(1).max(0).arraySync() + } + let mn = mxs.length + let mx = 0 + for (let i = 0; i < mxs.length; i++) { + if (mxs[i] > 0) { + mn = i + break + } + } + for (let i = mxs.length - 1; i >= 0; i--) { + if (mxs[i] > 0) { + mx = i + break + } + } + return [mn, mx] +} + +export async function firstLastNonZero3D(tensor3D) { + const [row_min, row_max] = await firstLastNonZero(tensor3D, 0) + const [col_min, col_max] = await firstLastNonZero(tensor3D, 1) + const [depth_min, depth_max] = await firstLastNonZero(tensor3D, 2) + console.log('row min and max :', row_min, row_max) + console.log('col min and max :', col_min, col_max) + console.log('depth min and max :', depth_min, depth_max) + return [row_min, row_max, col_min, col_max, depth_min, depth_max] +} + +/* +//simpler function, but x4 slower +export async function firstLastNonZero3D(tensor3D) { + const coords = await tf.whereAsync(tensor3D) + const row_min = coords.min(0).arraySync()[0] + const row_max = coords.max(0).arraySync()[0] + const col_min = coords.min(0).arraySync()[1] + const col_max = coords.max(0).arraySync()[1] + const depth_min = coords.min(0).arraySync()[2] + const depth_max = coords.max(0).arraySync()[2] + coords.dispose() + return [row_min, row_max, col_min, col_max, depth_min, depth_max] +} +*/ + +export async function generateBrainMask( + unstackOutVolumeTensor, + num_of_slices, + slice_height, + slice_width, + modelEntry, + opts, + callbackUI, + callbackImg, + isFinalImage = true +) { + if (unstackOutVolumeTensor[0].dtype !== 'int32') { + callbackUI('', -1, 'generateBrainMask assumes int32') + } + if (modelEntry.preModelPostProcess) { + callbackUI('', -1, 'generateBrainMask assumes BWLabeler instead of preModelPostProcess') + } + const numSlices = unstackOutVolumeTensor.length + const numPixels2D = unstackOutVolumeTensor[0].size + const numVox3D = numSlices * numPixels2D + // preallocate to reduce heap usage + const brainOut = new Int32Array(numVox3D) + let offset = 0 + for (let i = 0; i < numSlices; i++) { + brainOut.set(unstackOutVolumeTensor[i].dataSync(), offset) + offset += numPixels2D + } + for (let i = 0; i < numVox3D; i++) { + brainOut[i] = brainOut[i] !== 0 ? 1 : 0 + } + if (isFinalImage || opts.showPhase1Output) { + // all done + callbackImg(brainOut, opts, modelEntry) + callbackUI('Segmentation finished', 0) + } + return tf.tensor(brainOut, [num_of_slices, slice_height, slice_width]) +} + +export async function generateOutputSlicesV2( + img, + OutVolumeTensorShape, + OutVolumeTensorType, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage +) { + // Convert all slices into 1 Dim array + if (opts.isPostProcessEnable) { + const BWInstance = new BWLabeler() + const dim = new Uint32Array(OutVolumeTensorShape) + const conn = 26 // Example connectivity + const binarize = true + const onlyLargestClusterPerClass = true + const [_labelCount, labeledImage] = BWInstance.bwlabel(img, dim, conn, binarize, onlyLargestClusterPerClass) + for (let i = 0; i < img.length; i++) { + img[i] *= labeledImage[i] + } + } // if isPostProcessEnable + const typedArrayConstructor = { + float32: Float32Array, + int32: Int32Array + // Add other cases as needed for different dtypes + }[OutVolumeTensorType] + // Create a new TypedArray from img with the same type as outLabelVolume + const allOutputSlices3DCC1DimArray = new Uint8Array(img) + switch (modelEntry.type) { + case 'Brain_Masking': { + const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length) + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + } + return brainMask + } + case 'Brain_Extraction': { + const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length) + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + // Create the mask - 1 where the value is non-zero, 0 where it is zero. + const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + // Apply the mask to the data - multiply by the mask value. + maskedData[i] = niftiImage[i] * maskValue + } + return maskedData + } + } + return img +} + +export async function getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage) { + // Get nifti dimensions + const cols = niftiHeader.dims[1] // Slice width + const rows = niftiHeader.dims[2] // Slice height + let typedData + if (niftiHeader.datatypeCode === 2) { + // enum from nvimage/utils DT_UINT8 = 2 + typedData = new Uint8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 4) { + // DT_INT16 = 4 + typedData = new Int16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 8) { + // DT_INT32 = 8 + typedData = new Int32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 16) { + // DT_FLOAT32 = 16 + typedData = new Float32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 64) { + // DT_FLOAT64 = 64 + typedData = new Float64Array(niftiImage) + } else if (niftiHeader.datatypeCode === 256) { + // DT_INT8 = 256 + typedData = new Int8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 512) { + // DT_UINT16 = 512 + typedData = new Uint16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 768) { + // DT_UINT32 = 768 + typedData = new Uint32Array(niftiImage) + } else { + return + } + const allSlices_2D = [] + let offset3D = 0 + // Draw pixels + for (let slice = 0; slice < num_of_slices; slice++) { + const slice = new Array(rows * cols) + let offset2D = 0 + for (let row = 0; row < rows; row++) { + for (let col = 0; col < cols; col++) { + const value = typedData[offset3D++] + // Create 1Dim Array of pixel value, this 1 dim represents one channel + slice[offset2D++] = value & 0xff + } + } + allSlices_2D.push(tf.tensor(slice, [rows, cols])) // slice_height, slice_width + } + const allSlices_3D = tf.stack(allSlices_2D) + tf.dispose(allSlices_2D) + return allSlices_3D +} + +export async function getModelNumLayers(modelObj) { + return modelObj.layers.length +} + +export async function getModelNumParameters(modelObj) { + let numParameters = 0 + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + numParameters += modelObj.layers[layerIdx].countParams() + } + return numParameters +} + +export async function isModelChnlLast(modelObj) { + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + if (modelObj.layersByDepth[layerIdx][0].dataFormat) { + return modelObj.layersByDepth[layerIdx][0].dataFormat === 'channelsLast' + } + } +} + +export async function load_model(modelUrl) { + return await tf.loadLayersModel(modelUrl) +} + +export async function minMaxNormalizeVolumeData(volumeData) { + // Normalize the data to the range 0 - 1 using min-max scaling + const volumeData_Max = volumeData.max() + const volumeData_Min = volumeData.min() + const normalizedSlices_3d = await volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min)) + return normalizedSlices_3d +} + +function processTensorInChunks(inputTensor, filterWeights, chunkSize) { + // Assuming inputTensor's shape: [batch, depth, height, width, inChannels] + // and filterWeights's shape: [filterDepth, filterHeight, filterWidth, inChannels, outChannels] + const stride = 1 + const pad = 0 + const dilationRate = 1 + const inChannels = inputTensor.shape[4] + const numSlices = Math.ceil(inChannels / chunkSize) + + let accumulatedResult = null + for (let i = 0; i < numSlices; i++) { + const startChannel = i * chunkSize + const endChannel = Math.min((i + 1) * chunkSize, inChannels) + const channels = endChannel - startChannel + + const inputSlice = tf.tidy(() => { + // Slice the input tensor to get the current chunk + return inputTensor.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, channels]) + }) + const filterSlice = tf.tidy(() => { + // Slice the filter weights to match the input tensor's current chunk + return filterWeights.slice([0, 0, 0, startChannel, 0], [-1, -1, -1, channels, -1]) + }) + + const resultSlice = tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + // Clean up the slices to free memory + inputSlice.dispose() + filterSlice.dispose() + + // Squeeze the result slice to remove dimensions of size 1 + const squeezedResultSlice = tf.squeeze(resultSlice) + resultSlice.dispose() // Dispose of the original resultSlice after squeezing + + if (accumulatedResult === null) { + accumulatedResult = squeezedResultSlice + } else { + // Accumulate the result by adding the new result slice to it + const newAccumulatedResult = accumulatedResult.add(squeezedResultSlice) + + // Dispose of the previous accumulatedResult and squeezedResultSlice + accumulatedResult.dispose() + // Dispose of squeezedResultSlice only if it wasn't assigned to accumulatedResult + if (accumulatedResult !== squeezedResultSlice) { + squeezedResultSlice.dispose() + } + // Update accumulatedResult with the new result + accumulatedResult = newAccumulatedResult + } + + tf.tidy(() => { + tf.matMul(tf.zeros([1, 1]), tf.zeros([1, 1])) + }) + } + + return accumulatedResult +} + +export async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) { + // Call calculateQuantiles and wait for the result + const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile) + + // Convert qmin and qmax back to scalars + const qminScalar = tf.scalar(qmin) + const qmaxScalar = tf.scalar(qmax) + + // Perform the operation: (tensor - qmin) / (qmax - qmin) + const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar)) + + // Dispose of the created scalars to free memory + qminScalar.dispose() + qmaxScalar.dispose() + + // Return the resulting tensor + return resultTensor +} + +export async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + const [h, w, d] = tensor3d.shape + return tensor3d.slice([rowPad, colPad, depthPad], [h - 2 * rowPad, w - 2 * colPad, d - 2 * depthPad]) +} + +export async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr) { + const row_pad_befor = refVoxel[0] + const col_pad_befor = refVoxel[1] + const depth_pad_befor = refVoxel[2] + // last and lower volume voxel + const row_max = row_pad_befor + boundVolSizeArr[0] - 1 // size [2, 2, 2] means 2 voxels total in each dim + const col_max = col_pad_befor + boundVolSizeArr[1] - 1 + const depth_max = depth_pad_befor + boundVolSizeArr[2] - 1 + + const row_pad_after = newHeight - row_max - 1 > 0 ? newHeight - row_max - 1 : 0 + const col_pad_after = newWidth - col_max - 1 > 0 ? newWidth - col_max - 1 : 0 + const depth_pad_after = newDepth - depth_max - 1 > 0 ? newDepth - depth_max - 1 : 0 + + return croppedTensor3d.pad([ + [row_pad_befor, row_pad_after], + [col_pad_befor, col_pad_after], + [depth_pad_befor, depth_pad_after] + ]) +} + +export class SequentialConvLayer { + constructor(model, chunkSize, isChannelLast, callbackUI, isWebWorker = true) { + this.model = model + this.outChannels = model.outputLayers[0].kernel.shape[4] + this.chunkSize = chunkSize + this.isChannelLast = isChannelLast + this.callbackUI = callbackUI + this.isWebWorker = isWebWorker + } + + /** + * Apply sequential convolution layer + * @since 3.0.0 + * @member SequentialConvLayer + * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ] + * @return {outC} + */ + + async apply(inputTensor) { + const oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD') + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) + + // eslint-disable-next-line @typescript-eslint/no-this-alias + const self = this + // Important to avoid "undefined" class var members inside the timer. + // "this" has another meaning inside the timer. + + // document.getElementById("progressBarChild").parentElement.style.visibility = "visible" + const startTime = performance.now() + + const convLayer = self.model.layers[self.model.layers.length - 1] + const weights = convLayer.getWeights()[0] // + const biases = convLayer.getWeights()[1] + const outputShape = self.isChannelLast ? inputTensor.shape.slice(1, -1) : inputTensor.shape.slice(2) + // -- e.g. outputShape : [256,256,256] or cropped Dim + // -- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W] + // -- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W] + + let outB = tf.mul(tf.ones(outputShape), -10000) + // -- e.g. outB.shape [256,256,256] + let outC = tf.zeros(outputShape) + // -- e.g. outC.shape [256,256,256] + let chIdx = 0 + + // console.log("---------------------------------------------------------") + console.log(' channel loop') + + while (true) { + tf.engine().startScope() // Start TensorFlow.js scope + /* console.log('=======================') + const memoryInfo0 = await tf.memory() + console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`) + console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`) */ + + const result = await tf.tidy(() => { + const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]) + // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ] + const filterBiases = biases.slice([chIdx], [1]) + // -- e.g. filterBiases.shape [1] -> Tensor [-0.7850812] + const outA = processTensorInChunks(inputTensor, filterWeights, Math.min(self.chunkSize, self.outChannels)).add( + filterBiases + ) + const greater = tf.greater(outA, outB) + const newoutB = tf.where(greater, outA, outB) + const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC) + // Dispose the old tensors before reassigning + tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]) + // Dummy operation to trigger cleanup + tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1]))) + return [newoutC, newoutB] + }) + console.log('=======================') + self.callbackUI(`Iteration ${chIdx}`, chIdx / self.outChannels) + if (!self.isWebWorker) { + // allow user interface to refresh + await new Promise((resolve) => setTimeout(resolve, 17)) + } + const memoryInfo = await tf.memory() + console.log(`Number of Tensors: ${memoryInfo.numTensors}`) + console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`) + console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`) + if (memoryInfo.unreliable) { + console.log(`Unreliable: ${memoryInfo.unreliable}`) + } + // Dispose of previous values before assigning new tensors to outC and outB + if (typeof outC !== 'undefined') { + outC.dispose() + } + if (typeof outB !== 'undefined') { + outB.dispose() + } + // Assign the new values to outC and outB + outC = tf.keep(result[0]) + outB = tf.keep(result[1]) + // // Assign the new values to outC and outB + // outC = result[0] + // outB = result[1] + tf.engine().endScope() + + if (chIdx === self.outChannels - 1) { + // document.getElementById("progressBarChild").style.width = 0 + "%" + tf.dispose(outB) + const endTime = performance.now() + const executionTime = endTime - startTime + console.log(`Execution time for output layer: ${executionTime} milliseconds`) + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold) + return outC + } else { + chIdx++ + + // the seemingly strange sequence of operations + // below prevents tfjs from uncontrolably + // grabbing buffers, even when all tensors have + // already been disposed + + const outCShape = outC.shape + const outCdata = outC.dataSync() + const outBShape = outC.shape + const outBdata = outB.dataSync() + outC.dispose() + outB.dispose() + // tf.disposeVariables() + outC = tf.tensor(outCdata, outCShape) + outB = tf.tensor(outBdata, outBShape) + + // document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%" + } + } + } +} // <<<< End of class diff --git a/vite.config.js b/vite.config.js new file mode 100644 index 0000000..25e5b81 --- /dev/null +++ b/vite.config.js @@ -0,0 +1,16 @@ +import { defineConfig } from 'vite' + +export default defineConfig({ + // root: '.', + base: './', + server: { + open: 'index.html', + }, + worker: { + format: 'esm' + }, + // exclude @niivue/niimath from optimization + optimizeDeps: { + exclude: ['@niivue/niimath'] + } +}) \ No newline at end of file