Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

System properties for debugging #91

Merged
merged 2 commits into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 74 additions & 19 deletions src/main/java/qupath/ext/instanseg/core/InstanSeg.java
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,14 @@
import ai.djl.ndarray.BaseNDManager;
import ai.djl.repository.zoo.Criteria;
import ai.djl.training.util.ProgressBar;
import org.bytedeco.opencv.global.opencv_core;
import org.bytedeco.opencv.opencv_core.Mat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import qupath.lib.experimental.pixels.OpenCVProcessor;
import qupath.lib.experimental.pixels.OutputHandler;
import qupath.lib.experimental.pixels.Parameters;
import qupath.lib.experimental.pixels.Processor;
import qupath.lib.images.ImageData;
import qupath.lib.images.servers.ColorTransforms;
import qupath.lib.objects.PathAnnotationObject;
Expand Down Expand Up @@ -226,33 +230,34 @@ private InstanSegResults runInstanSeg(ImageData<BufferedImage> imageData, Collec
printResourceCount("Resource count after creating predictors",
(BaseNDManager)baseManager.getParentManager());

int sizeWithoutPadding = (int) Math.ceil(downsample * (tileDims - (double) padding*2));
var predictionProcessor = new TilePredictionProcessor(predictors, inputChannels, tileDims, tileDims, padToInputSize);
var tiler = createTiler(downsample, tileDims, padding);
var predictionProcessor = createProcessor(predictors, inputChannels, tileDims, padToInputSize);
var outputHandler = createOutputHandler(preferredOutputClass, randomColors, boundaryThreshold);
var postProcessor = createPostProcessor();

var processor = OpenCVProcessor.builder(predictionProcessor)
.imageSupplier((parameters) -> ImageOps.buildImageDataOp(inputChannels)
.apply(parameters.getImageData(), parameters.getRegionRequest()))
.tiler(Tiler.builder(sizeWithoutPadding)
.alignCenter()
.cropTiles(false)
.build()
)
.outputHandler(
new PruneObjectOutputHandler<>(
new InstanSegOutputToObjectConverter(preferredOutputClass, randomColors), boundaryThreshold))
.tiler(tiler)
.outputHandler(outputHandler)
.padding(padding)
.postProcess(createPostProcessor())
.postProcess(postProcessor)
.downsample(downsample)
.build();
processor.processObjects(taskRunner, imageData, pathObjects);
int nObjects = pathObjects.stream().mapToInt(PathObject::nChildObjects).sum();
return new InstanSegResults(
predictionProcessor.getPixelsProcessedCount(),
predictionProcessor.getTilesProcessedCount(),
predictionProcessor.getTilesFailedCount(),
nObjects,
System.currentTimeMillis() - startTime,
predictionProcessor.wasInterrupted()
);
if (predictionProcessor instanceof TilePredictionProcessor tileProcessor) {
return new InstanSegResults(
tileProcessor.getPixelsProcessedCount(),
tileProcessor.getTilesProcessedCount(),
tileProcessor.getTilesFailedCount(),
nObjects,
System.currentTimeMillis() - startTime,
tileProcessor.wasInterrupted()
);
} else {
return InstanSegResults.emptyInstance();
}
} finally {
for (var predictor: predictors) {
predictor.close();
Expand All @@ -266,6 +271,54 @@ private InstanSegResults runInstanSeg(ImageData<BufferedImage> imageData, Collec
}
}

/**
* Check if we are requesting tiles for debugging purposes.
* When this is true, we should create objects that represent the tiles - not the objects to be detected.
* @return
*/
private static boolean debugTiles() {
return System.getProperty("instanseg.debug.tiles", "false").strip().equalsIgnoreCase("true");
}

private static Processor<Mat, Mat, Mat> createProcessor(BlockingQueue<Predictor<Mat, Mat>> predictors,
Collection<? extends ColorTransforms.ColorTransform> inputChannels,
int tileDims, boolean padToInputSize) {
if (debugTiles())
return InstanSeg::createOnes;
return new TilePredictionProcessor(predictors, inputChannels, tileDims, tileDims, padToInputSize);
}

private static Mat createOnes(Parameters<Mat, Mat> parameters) {
var tileRequest = parameters.getTileRequest();
int width, height;
if (tileRequest == null) {
var region = parameters.getRegionRequest();
width = (int)Math.round(region.getWidth() / region.getDownsample());
height = (int)Math.round(region.getHeight() / region.getDownsample());
} else {
width = tileRequest.getTileWidth();
height = tileRequest.getTileHeight();
}
return Mat.ones(height, width, opencv_core.CV_8UC1).asMat();
}

private static OutputHandler<Mat, Mat, Mat> createOutputHandler(Class<? extends PathObject> preferredOutputClass, boolean randomColors,
int boundaryThreshold) {
if (debugTiles())
return OutputHandler.createUnmaskedObjectOutputHandler(OpenCVProcessor.createAnnotationConverter());
return new PruneObjectOutputHandler<>(
new InstanSegOutputToObjectConverter(preferredOutputClass, randomColors), boundaryThreshold);
}

private static Tiler createTiler(double downsample, int tileDims, int padding) {
int sizeWithoutPadding = (int) Math.ceil(downsample * (tileDims - (double) padding*2));
return Tiler.builder(sizeWithoutPadding)
.alignCenter()
.cropTiles(false)
.build();
}


/**
* Get the input channels to use; if we don't have any specified, use all of them
* @param imageData
Expand All @@ -284,6 +337,8 @@ private List<ColorTransforms.ColorTransform> getInputChannels(ImageData<Buffered
}

private static ObjectProcessor createPostProcessor() {
if (debugTiles())
return null;
var merger = ObjectMerger.createIoMinMerger(0.5);
var fixer = OverlapFixer.builder()
.clipOverlaps()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ public Mat process(Parameters<Mat, Mat> params) throws IOException {
var matOutput = predictor.predict(mat);

// These are useful for spotting issues with the model
if (System.getProperty("instanseg.showTiles", "false").equalsIgnoreCase("true")) {
if (System.getProperty("instanseg.debug.predictions", "false").equalsIgnoreCase("true")) {
OpenCVTools.matToImagePlus("Input " + params.getRegionRequest(), mat).show();
OpenCVTools.matToImagePlus("Output " + params.getRegionRequest(), matOutput).show();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -794,14 +794,16 @@ private void runInstanSeg(InstanSegModel model) {
CompletableFuture.supplyAsync(this::ensurePyTorchAvailable, ForkJoinPool.commonPool())
.thenAccept((Boolean success) -> {
if (success) {
pendingTask.set(task);
// Reset the pending task when it completes (either successfully or not)
task.stateProperty().addListener((observable, oldValue, newValue) -> {
if (Set.of(Worker.State.CANCELLED, Worker.State.SUCCEEDED, Worker.State.FAILED).contains(newValue)) {
if (pendingTask.get() == task)
pendingTask.set(null);
}
});
// Setting the pending task prompts it to be run (so we need to do it after attaching
// the listener, in case it ends really quickly)
pendingTask.set(task);
}
});
}
Expand Down