diff --git a/.github/workflows/run_tests_os.yml b/.github/workflows/run_tests_os.yml index f7b7d6bb2..823016477 100644 --- a/.github/workflows/run_tests_os.yml +++ b/.github/workflows/run_tests_os.yml @@ -8,8 +8,8 @@ jobs: run-tests: strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] # fails on windows until https://github.com/MIC-DKFZ/nnUNet/issues/2396 is resolved - # os: [ubuntu-latest, macos-latest] + # os: [ubuntu-latest, windows-latest, macos-latest] # fails on windows until https://github.com/MIC-DKFZ/nnUNet/issues/2396 is resolved + os: [ubuntu-latest, macos-latest] python-version: ["3.10"] runs-on: ${{ matrix.os }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 91e772eb7..89e00d8bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ * add `totalseg_get_modality` * change pi_time threshold for arterial late phase from 50s to 60s * add oculomotor muscles model +* removed `rt_utils` and `p_tqdm` dependency + ## Release 2.4.0 * add brain structures diff --git a/resources/train_nnunet.md b/resources/train_nnunet.md index 320d815fc..86962ece7 100644 --- a/resources/train_nnunet.md +++ b/resources/train_nnunet.md @@ -6,7 +6,7 @@ 4. Preprocess `nnUNetv2_plan_and_preprocess -d -pl ExperimentPlanner -c 3d_fullres -np 2` 5. Train `nnUNetv2_train 3d_fullres 0 -tr nnUNetTrainerNoMirroring` (takes several days) 6. Predict test set `nnUNetv2_predict -i path/to/imagesTs -o path/to/labelsTs_predicted -d -c 3d_fullres -tr nnUNetTrainerNoMirroring --disable_tta -f 0` -7. Evaluate `python resources/evaluate.py path/to/labelsTs path/to/labelsTs_predicted` (requires `pip install git+https://github.com/google-deepmind/surface-distance.git`). The resulting numbers should be similar to the ones in `resources/evaluate_results.txt` (since training is not deterministic the mean dice score across all classes can vary by up to one dice point) +7. Evaluate `python resources/evaluate.py path/to/labelsTs path/to/labelsTs_predicted` (requires `pip install git+https://github.com/google-deepmind/surface-distance.git` and `pip install p_tqdm`). The resulting numbers should be similar to the ones in `resources/evaluate_results.txt` (since training is not deterministic the mean dice score across all classes can vary by up to one dice point) 8. Done > Note: This will not give you the same results as TotalSegmentator for two reasons: diff --git a/setup.py b/setup.py index c0321a543..aec779d6d 100644 --- a/setup.py +++ b/setup.py @@ -22,12 +22,10 @@ 'SimpleITK', 'nibabel>=2.3.0', 'tqdm>=4.45.0', - 'p_tqdm', 'xvfbwrapper', 'nnunetv2>=2.2.1', 'requests==2.27.1;python_version<"3.10"', 'requests;python_version>="3.10"', - 'rt_utils', 'dicom2nifti', 'pyarrow' ], diff --git a/totalsegmentator/nnunet.py b/totalsegmentator/nnunet.py index 627f490c5..2b1d12109 100644 --- a/totalsegmentator/nnunet.py +++ b/totalsegmentator/nnunet.py @@ -18,7 +18,7 @@ import numpy as np import nibabel as nib from nibabel.nifti1 import Nifti1Image -from p_tqdm import p_map +# from p_tqdm import p_map import torch from totalsegmentator.libs import nostdout @@ -666,21 +666,22 @@ def nnUNet_predict_image(file_in: Union[str, Path, Nifti1Image], file_out, task_ if nora_tag != "None": subprocess.call(f"/opt/nora/src/node/nora -p {nora_tag} --add {output_path} --addtag mask", shell=True) else: + nib.save(img_pred, tmp_dir / "s01.nii.gz") # needed inside of threads + # Code for multithreaded execution # Speed with different number of threads: # 1: 46s, 2: 24s, 6: 11s, 10: 8s, 14: 8s - nib.save(img_pred, tmp_dir / "s01.nii.gz") - _ = p_map(partial(save_segmentation_nifti, tmp_dir=tmp_dir, file_out=file_out, nora_tag=nora_tag, header=new_header, task_name=task_name, quiet=quiet), - selected_classes.items(), num_cpus=nr_threads_saving, disable=quiet) + # _ = p_map(partial(save_segmentation_nifti, tmp_dir=tmp_dir, file_out=file_out, nora_tag=nora_tag, header=new_header, task_name=task_name, quiet=quiet), + # selected_classes.items(), num_cpus=nr_threads_saving, disable=quiet) # Multihreaded saving with same functions as in nnUNet -> same speed as p_map - # pool = Pool(nr_threads_saving) - # results = [] - # for k, v in selected_classes.items(): - # results.append(pool.starmap_async(save_segmentation_nifti, ((k, v, tmp_dir, file_out, nora_tag),) )) - # _ = [i.get() for i in results] # this actually starts the execution of the async functions - # pool.close() - # pool.join() + pool = Pool(nr_threads_saving) + results = [] + for k, v in selected_classes.items(): + results.append(pool.starmap_async(save_segmentation_nifti, [((k, v), tmp_dir, file_out, nora_tag, new_header, task_name, quiet)])) + _ = [i.get() for i in results] # this actually starts the execution of the async functions + pool.close() + pool.join() if not quiet: print(f" Saved in {time.time() - st:.2f}s") # Postprocessing single files diff --git a/totalsegmentator/python_api.py b/totalsegmentator/python_api.py index cf3acec3e..1a191c62a 100644 --- a/totalsegmentator/python_api.py +++ b/totalsegmentator/python_api.py @@ -98,6 +98,12 @@ def totalsegmentator(input: Union[str, Path, Nifti1Image], output: Union[str, Pa validate_device_type_api(device) device = convert_device_to_cuda(device) + if output_type == "dicom": + try: + from rt_utils import RTStructBuilder + except ImportError: + raise ImportError("rt_utils is required for output_type='dicom'. Please install it with 'pip install rt_utils'.") + # available devices: gpu | cpu | mps | gpu:1, gpu:2, etc. if device == "gpu": device = "cuda" diff --git a/totalsegmentator/statistics.py b/totalsegmentator/statistics.py index fda15e96d..5d435a9e9 100644 --- a/totalsegmentator/statistics.py +++ b/totalsegmentator/statistics.py @@ -10,7 +10,6 @@ import nibabel as nib from nibabel.nifti1 import Nifti1Image from tqdm import tqdm -from p_tqdm import p_map import numpy.ma as ma from totalsegmentator.map_to_binary import class_map @@ -58,8 +57,7 @@ def get_radiomics_features(seg_file, img_file="ct.nii.gz"): def get_radiomics_features_for_entire_dir(ct_file:Path, mask_dir:Path, file_out:Path): masks = sorted(list(mask_dir.glob("*.nii.gz"))) - stats = p_map(partial(get_radiomics_features, img_file=ct_file), - masks, num_cpus=1, disable=False) + stats = [get_radiomics_features(ct_file, mask) for mask in masks] stats = {mask_name: stats for mask_name, stats in stats} with open(file_out, "w") as f: json.dump(stats, f, indent=4)