You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Operating System:
tiatoolbox 1.5.1 (Python 3.10.12) on Linux-5.15.0-118-generic-x86_64-with-glibc2.35.
I am trying to use the pretrained nucleus detection model with conic dataset and here is my code.
It's essentially the same code from the website almost. I am running it in my machine and it's giving me error.
I have ndpi images for colon biopsy slides. I want to predict different kind of cells using this code but I am struggling to figure out where I am going wrong.
import logging
import warnings
if logging.getLogger().hasHandlers():
logging.getLogger().handlers.clear()
import cv2
import joblib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from tiatoolbox import logger
from tiatoolbox.models.engine.nucleus_instance_segmentor import NucleusInstanceSegmentor
from tiatoolbox.utils.misc import download_data, imread
# We need this function to visualize the nuclear predictions
from tiatoolbox.utils.visualization import (
overlay_prediction_contours,
)
from tiatoolbox.wsicore.wsireader import WSIReader
warnings.filterwarnings("ignore")
mpl.rcParams["figure.dpi"] = 300 # for high resolution figure in notebook
mpl.rcParams["figure.facecolor"] = "white" # To make sure text is visible in dark mode
plt.rcParams.update({"font.size": 5})
from tiatoolbox.models import IOPatchPredictorConfig
ioconfig = IOPatchPredictorConfig(
margin= 128,
tile_shape= [256,256],
patch_input_shape=(31, 31),
stride_shape=(8, 8),
input_resolutions=[{"resolution": 0.25, "units": "mpp"}]
)
# Instantiate the nucleus instance segmentor
inst_segmentor = NucleusInstanceSegmentor(
pretrained_model="mapde-conic",
num_loader_workers=0,
num_postproc_workers=0,
batch_size=4,
auto_generate_mask=False,
verbose=True,
)
import os
from CoNICmain.misc.utils import rmdir,recur_find_ext
OUT_DIR = '/mnt/nas/Ankana/AI_pred_Ankana/CoNIC_annotated_images/segmented_output/sample_wsi_results/'
wsi_file_name='/mnt/nas/Ankana/AI_pred_Ankana/CoNIC_annotated_images/raw_data/SUSI-493-V0-HE-01-01 - 2024-07-29 17.19.56.ndpi'
rmdir(OUT_DIR)
print(wsi_file_name)
print(os.path.exists(OUT_DIR))
# WSI prediction
# if ON_GPU=False, this part will take more than a couple of hours to process.
wsi_output = inst_segmentor.predict(
[wsi_file_name],
ioconfig=ioconfig,
masks=None,
save_dir=OUT_DIR,
mode="wsi",
on_gpu=True,
crash_on_exception=True,
)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[18], line 3
1 # WSI prediction
2 # if ON_GPU=False, this part will take more than a couple of hours to process.
----> 3 wsi_output = inst_segmentor.predict(
4 [wsi_file_name],
5 ioconfig=ioconfig,
6 masks=None,
7 save_dir=OUT_DIR,
8 mode="wsi",
9 on_gpu=ON_GPU,
10 crash_on_exception=True,
11 )
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/engine/semantic_segmentor.py:1399, in SemanticSegmentor.predict(self, imgs, masks, mode, ioconfig, patch_input_shape, patch_output_shape, stride_shape, resolution, units, save_dir, on_gpu, crash_on_exception)
1396 # ? what will happen if this crash midway?
1397 # => may not be able to retrieve the result dict
1398 for wsi_idx, img_path in enumerate(imgs):
-> 1399 self._predict_wsi_handle_exception(
1400 imgs=imgs,
1401 wsi_idx=wsi_idx,
1402 img_path=img_path,
1403 mode=mode,
1404 ioconfig=ioconfig,
1405 save_dir=save_dir,
1406 crash_on_exception=crash_on_exception,
1407 )
1409 # clean up the cache directories
1410 try:
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/engine/semantic_segmentor.py:1249, in SemanticSegmentor._predict_wsi_handle_exception(self, imgs, wsi_idx, img_path, mode, ioconfig, save_dir, crash_on_exception)
1247 wsi_save_path = save_dir.joinpath(f"{wsi_idx}")
1248 if crash_on_exception:
-> 1249 raise err # noqa: TRY201
1250 logging.exception("Crashed on %s", wsi_save_path)
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/engine/semantic_segmentor.py:1225, in SemanticSegmentor._predict_wsi_handle_exception(self, imgs, wsi_idx, img_path, mode, ioconfig, save_dir, crash_on_exception)
1223 try:
1224 wsi_save_path = save_dir / f"{wsi_idx}"
-> 1225 self._predict_one_wsi(wsi_idx, ioconfig, str(wsi_save_path), mode)
1227 # Do not use dict with file name as key, because it can be
1228 # overwritten. It may be user intention to provide files with a
1229 # same name multiple times (maybe they have different root path)
1230 self._outputs.append([str(img_path), str(wsi_save_path)])
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/engine/nucleus_instance_segmentor.py:743, in NucleusInstanceSegmentor._predict_one_wsi(self, wsi_idx, ioconfig, save_path, mode)
740 tile_patch_outputs = patch_outputs[sel_indices]
741 self._to_shared_space(wsi_idx, tile_patch_inputs, tile_patch_outputs)
--> 743 tile_infer_output = self._infer_once()
745 self._process_tile_predictions(
746 ioconfig,
747 tile_bounds,
(...)
750 tile_infer_output,
751 )
753 self._merge_post_process_results()
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/engine/nucleus_instance_segmentor.py:645, in NucleusInstanceSegmentor._infer_once(self)
638 batch_size = sample_infos.shape[0]
639 # ! depending on the protocol of the output within infer_batch
640 # ! this may change, how to enforce/document/expose this in a
641 # ! sensible way?
642
643 # assume to return a list of L output,
644 # each of shape N x etc. (N=batch size)
--> 645 sample_outputs = self.model.infer_batch(
646 self._model,
647 sample_datas,
648 on_gpu=self._on_gpu,
649 )
650 # repackage so that it's a N list, each contains
651 # L x etc. output
652 sample_outputs = [np.split(v, batch_size, axis=0) for v in sample_outputs]
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/architecture/mapde.py:291, in MapDe.infer_batch(model, batch_data, on_gpu)
288 model.eval() # infer mode
290 with torch.inference_mode():
--> 291 pred = model(patch_imgs_gpu)
293 pred = pred.permute(0, 2, 3, 1).contiguous()
294 pred = pred.cpu().numpy()
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
-> 1518 return self._call_impl(*args, **kwargs)
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py:183, in DataParallel.forward(self, *inputs, **kwargs)
180 module_kwargs = ({},)
182 if len(self.device_ids) == 1:
--> 183 return self.module(*inputs[0], **module_kwargs[0])
184 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
185 outputs = self.parallel_apply(replicas, inputs, module_kwargs)
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/torch/nn/modules/module.py:1518, in Module._wrapped_call_impl(self, *args, **kwargs)
1516 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1517 else:
-> 1518 return self._call_impl(*args, **kwargs)
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/torch/nn/modules/module.py:1527, in Module._call_impl(self, *args, **kwargs)
1522 # If we don't have any hooks, we want to skip the rest of the logic in
1523 # this function, and just call forward.
1524 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1525 or _global_backward_pre_hooks or _global_backward_hooks
1526 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1527 return forward_call(*args, **kwargs)
1529 try:
1530 result = None
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/architecture/mapde.py:228, in MapDe.forward(self, input_tensor)
213 def forward(self: MapDe, input_tensor: torch.Tensor) -> torch.Tensor:
214 """Logic for using layers defined in init.
215
216 This method defines how layers are used in forward operation.
(...)
226
227 """
--> 228 logits, _, _, _ = super().forward(input_tensor)
229 out = F.conv2d(logits, self.dist_filter, padding="same")
230 return F.relu(out)
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/architecture/micronet.py:533, in MicroNet.forward(self, input_tensor)
515 def forward( # skipcq: PYL-W0221
516 self: MicroNet,
517 input_tensor: torch.Tensor,
518 ) -> list[torch.Tensor, torch.Tensor, torch.Tensor]:
519 """Logic for using layers defined in init.
520
521 This method defines how layers are used in forward operation.
(...)
531
532 """
--> 533 b1 = group1_forward_branch(
534 self.layer["b1"],
535 input_tensor,
536 functional.interpolate(input_tensor, size=(128, 128), mode="bicubic"),
537 )
538 b2 = group1_forward_branch(
539 self.layer["b2"],
540 b1,
541 functional.interpolate(input_tensor, size=(64, 64), mode="bicubic"),
542 )
543 b3 = group1_forward_branch(
544 self.layer["b3"],
545 b2,
546 functional.interpolate(input_tensor, size=(32, 32), mode="bicubic"),
547 )
File /mnt/nas/Ankana/my_jupyter_env/lib/python3.10/site-packages/tiatoolbox/models/architecture/micronet.py:49, in group1_forward_branch(layer, in_tensor, resized_feat)
47 b = layer["conv3"](resized_feat)
48 b = layer["conv4"](b)
---> 49 return torch.cat(tensors=(a, b), dim=1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 13 but got size 124 for tensor number 1 in the list.
The NucleusInstanceSegmentor is not designed for nucleus detection. We started working on the detection engine in #538 but this has been postponed as we are redesigning the engines from scratch in #578 .
For now you can use the code from #538 to work out the solution.
tiatoolbox 1.5.1 (Python 3.10.12) on Linux-5.15.0-118-generic-x86_64-with-glibc2.35.
I am trying to use the pretrained nucleus detection model with conic dataset and here is my code.
It's essentially the same code from the website almost. I am running it in my machine and it's giving me error.
I have ndpi images for colon biopsy slides. I want to predict different kind of cells using this code but I am struggling to figure out where I am going wrong.
Is the code correct for what I am trying to do?
Similarly When I did
I am getting another error
The text was updated successfully, but these errors were encountered: