-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
b842ffa
commit cea88d1
Showing
1 changed file
with
122 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Initializating...\n" | ||
] | ||
}, | ||
{ | ||
"ename": "ModuleNotFoundError", | ||
"evalue": "No module named 'torch'", | ||
"output_type": "error", | ||
"traceback": [ | ||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", | ||
"\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", | ||
"\u001b[1;32mD:\\muriel\\Projects\\NNS\\nnseg.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpath\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0misfile\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0misdir\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mjoin\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbasename\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmath\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 7\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 8\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 9\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfunctional\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", | ||
"\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'torch'" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"run nnseg.py" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"id": "NbMDvoZ-vRiu" | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"#Before starting, make sure you have these packages installed (Google Colab comes with all of these pre-installed):\n", | ||
"#torch\n", | ||
"#torchvision\n", | ||
"#numpy\n", | ||
"#matplotlib\n", | ||
"#zipfile\n", | ||
"#cv2\n", | ||
"#PIL\n", | ||
"#scipy\n", | ||
"#skimage\n", | ||
"#sklearn\n", | ||
"#tqdm\n", | ||
"#tifffile\n", | ||
"#time\n", | ||
"\n", | ||
"#To initialize the program we have to import the code from github for example and run it:\n", | ||
"#!git clone https://github.com/user/repository.git\n", | ||
"%run -i 'nnseg.py'\n", | ||
"\n", | ||
"#The neighbor distance prediction (ndp) and cell distance prediction (cdp) are separate U-Net based neural networks. \n", | ||
"#With the following code we can load either the original model from the paper (Scherr et al., 2020)) or any trained model.\n", | ||
"#load_model needs 2 arguments. The first argument is the U-Net class with one of the two neural networks as argument. Example: UNet(\"ndp\")\n", | ||
"#The second argument is the path of the model you want to load in that chosen neural network. These models (or array of weights) have a .pt or .pth extension.\n", | ||
"#If you want to load in the original model from the paper, both ndp and cdp will need the same model: \"cell_seg_model_001.pth\"\n", | ||
"#If you want to load trained models, those models are most likely separate for ndp and cdp: \"ndp_model.pt\" \"cdp_model.pt\"\n", | ||
"ndp_model = load_model(UNet(\"ndp\"),\"/content/cell_seg_model_001.pth\") #or ndp_model = load_model(UNet(\"ndp\"),\"/content/ndp_model.pt\")\n", | ||
"cdp_model = load_model(UNet(\"cdp\"),\"/content/cell_seg_model_001.pth\") #or cdp_model = load_model(UNet(\"cdp\"),\"/content/cdp_model.pt\")\n", | ||
"\n", | ||
"#Generating new models can be done with the following command:\n", | ||
"#train_models(input_dir, segmentation_dir, epochs, batch_size)\n", | ||
"#input_dir = path of the directory with the original images such as \"/content/input/\". Original images need to be tiff files. \n", | ||
"#Make sure that the original images are sorted in the same order as the segmentation images. \n", | ||
"#If you want to make sure this is the case, segmentation files need to be named exactly like their original counterparts plus a surfix, such as \"image1_seg.tiff\".\n", | ||
"#segmentation_dir = path of the directory with the segmented images such as \"/content/input/\". Segmentation images need to be tiff files where each cell is given a unique grey value.\n", | ||
"#epochs = amount of epochs the training must run. Lower this when training needs to be done quicker, higher when you really want to make sure you got the best model out of the training. 100 epochs should be enough in most cases.\n", | ||
"#batch_size = the size of the batch during training. Higher means faster training, but also uses more Vram. Lower too use less Vram. 10 should be doable in google colab.\n", | ||
"#Training can take up to 2,5 hours per model, thus 5 hours. (with epochs = 200, batch_size = 10)\n", | ||
"train_models(\"/content/input/\",\"/content/segmentation/\", 100, 10)\n", | ||
"\n", | ||
"#Prediction of the segmentation will be done with the loaded models or with the trained models, which ever was last.\n", | ||
"#predict(input_dir, output_dir, tmask, tmarker)\n", | ||
"#input_dir = path of the directory where all the original images are that you want to segment.\n", | ||
"#output_dir = path of the directory where you want the output zip file to be generated. Will create the directory if not existing.\n", | ||
"#tmask = threshold value for generating the binary mask. A lower threshold will force the binary mask to be more inclusive. However, this may also lead to undersegmentation between cells. 0.15 is a good starting point.\n", | ||
"#tmarker = threshold value for generating the seeds. A higher threshold will force smaller seeds and improve segmentation between cells (division). 0.5 is a good starting point.\n", | ||
"#However, a too stringent threshold will filter out seeds and therefore filter out cells in the end result.\n", | ||
"#The output is given as a zip file of all the segmented images.\n", | ||
"#The function returns prediction data for use with the example function\n", | ||
"prediction = predict(\"/content/predict/\", \"/content/output/\", 0.15, 0.5)\n", | ||
"\n", | ||
"#The example command can be used after prediction to preview the segmentation of a certain image and to get more insight into the process.\n", | ||
"#It needs the prediction data as returned by the predict function.\n", | ||
"#example(index)\n", | ||
"#index = the index of the image you want to view. Starts at 0 and has the same order as your original input.\n", | ||
"example(1)\n" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"colab": { | ||
"name": "Untitled0.ipynb", | ||
"provenance": [] | ||
}, | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.8.5" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 1 | ||
} |