Skip to content

Commit 01f24f7

Browse files
authored
fix #31 : generate docstrings (#36)
1 parent 7d586bf commit 01f24f7

File tree

3 files changed

+102
-102
lines changed

3 files changed

+102
-102
lines changed

devolearn/embryo_generator_model/embryo_generator_model.py

Lines changed: 25 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,13 @@
2727

2828
class Generator(nn.Module):
2929
def __init__(self, ngf, nz, nc):
30+
"""GAN generator to generate synthetic images of embryos
31+
32+
Args:
33+
ngf (int): size of feature maps in generator
34+
nz (int): size of latent space noise (latent vector)
35+
nc (int): number of color channels of the output image
36+
"""
3037
super().__init__()
3138
self.main = nn.Sequential(
3239
# input is Z, going into a convolution
@@ -65,13 +72,11 @@ def forward(self, input):
6572

6673
class embryo_generator_model():
6774
def __init__(self, device = "cpu"):
68-
69-
"""
70-
ngf = size of feature maps in generator
71-
nz = size of latent space noise (latent vector)
72-
nc = number of color channels of the output image
75+
"""Generate synthetic single or multiple images of embryos.
7376
Do not tweak these unless you're changing the Generator() with a new model with a different architecture.
74-
77+
78+
Args:
79+
device (str, optional): set to "cuda", runs operations on gpu and set to "cpu", runs operations on cpu. Defaults to "cpu".
7580
"""
7681
self.device = device
7782
self.ngf = 128 ## generated image size
@@ -103,19 +108,17 @@ def __init__(self, device = "cpu"):
103108

104109

105110
def generate(self, image_size = (700,500)):
106-
107-
"""
111+
"""Generate one synthetic image of embryo.
112+
The native size of the GAN's output is 256*256, and then it resizes the generated image to the desired size.
108113
reference{
109114
https://github.com/DevoLearn/devolearn#generating-synthetic-images-of-embryos-with-a-pre-trained-gan
110115
}
111-
inputs{
112-
image_size <tuple> = (width,height of the generated image)
113-
}
114-
outputs{
115-
1 channel image as an <np.array>
116-
}
117-
The native size of the GAN's output is 256*256, and then it resizes the
118-
generated image to the desired size.
116+
117+
Args:
118+
image_size (tuple, optional): size of generated image,(width,height). Defaults to (700,500).
119+
120+
Returns:
121+
np.array : 1 channel image
119122
"""
120123
with torch.no_grad():
121124
noise = torch.randn([1,128,1,1]).to(self.device)
@@ -127,21 +130,15 @@ def generate(self, image_size = (700,500)):
127130

128131

129132
def generate_n_images(self, n = 3, foldername = "generated_images", image_size = (700,500), notebook_mode = False):
130-
"""
133+
"""This is an extension of the generator.generate() function for generating multiple images at once and saving them into a folder.
131134
reference{
132135
https://github.com/DevoLearn/devolearn#generating-synthetic-images-of-embryos-with-a-pre-trained-gan
133136
}
134-
inputs{
135-
n <int> = number of images to generate
136-
foldername <str> = name of the folder where the images whould be saved.
137-
The function automatically generates a folder if it doesn't exist
138-
notebook_mode <bool> = toogle between script(False) and notebook(True), for better user interface
139-
}
140-
outputs{
141-
None
142-
}
143-
144-
This is an extension of the generator.generate() function for generating multiple images at once and saving them into a folder.
137+
Args:
138+
n (int, optional): number of images to generate. Defaults to 3.
139+
foldername (str, optional): name of the folder where the images whould be savedThe function automatically generates a folder if it doesn't exist. Defaults to "generated_images".
140+
image_size (tuple, optional): size of generated image,(width,height). Defaults to (700,500).
141+
notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.
145142
"""
146143

147144
if os.path.isdir(foldername) == False:

devolearn/embryo_segmentor/embryo_segmentor.py

Lines changed: 40 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -25,19 +25,14 @@
2525
"""
2626

2727
def generate_centroid_image(thresh):
28-
29-
"""
30-
used when centroid_mode == True
31-
32-
input{
33-
thresh <np.array> = 2d numpy array that is returned from the segmentation model
34-
}
35-
outputs{
36-
centroid_image = image containing the contours and their respective centroids
37-
centroids = list of all centroids for the given image as [(x1,y1), (x2,y2)...]
38-
}
28+
"""Used when centroid_mode is set to True
3929
30+
Args:
31+
thresh (np.array): 2d numpy array that is returned from the segmentation model
4032
33+
Returns:
34+
np.array : image containing the contours and their respective centroids
35+
list : list of all centroids for the given image as [(x1,y1), (x2,y2)...]
4136
"""
4237

4338
thresh = cv2.blur(thresh, (5,5))
@@ -63,11 +58,11 @@ def generate_centroid_image(thresh):
6358

6459
class embryo_segmentor():
6560
def __init__(self, device = "cpu"):
66-
67-
"""
68-
Segments the c. elegans embryo from images/videos,
61+
"""Segments the c. elegans embryo from images/videos,
6962
depends on segmentation-models-pytorch for the model backbone
7063
64+
Args:
65+
device (str, optional): set to "cuda", runs operations on gpu and set to "cpu", runs operations on cpu. Defaults to "cpu".
7166
"""
7267
self.device = device
7368
self.ENCODER = 'resnet18'
@@ -110,20 +105,23 @@ def __init__(self, device = "cpu"):
110105

111106

112107
def predict(self, image_path, pred_size = (350,250), centroid_mode = False):
113-
114-
"""
108+
"""Loads an image from image_path and converts it to grayscale,
109+
then passes it though the model and returns centroids of the segmented features.
115110
reference{
116111
https://github.com/DevoLearn/devolearn#segmenting-the-c-elegans-embryo
117-
}
118-
inputs{
119-
image_path <str> = path to image
120-
pred_size <tuple> = (width,height) of the image to be returned, the default size of the model output is (256,256)
121-
centroid_mode <bool> = set to true to return both the segmented image and the list of centroids
122-
}
123-
outputs{
124-
1 channel image as an <np.array>
125-
optional <list> containing centroids
126112
}
113+
114+
Args:
115+
image_path (str): path to image
116+
pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).
117+
centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.
118+
119+
Returns:
120+
centroid_mode set to False:
121+
np.array : 1 channel image.
122+
centroid_mode set to True:
123+
np.array : 1 channel image,
124+
list : list of centroids.
127125
"""
128126

129127
im = cv2.imread(image_path,0)
@@ -138,6 +136,23 @@ def predict(self, image_path, pred_size = (350,250), centroid_mode = False):
138136

139137

140138
def predict_from_video(self, video_path, pred_size = (350,250), save_folder = "preds", centroid_mode = False, notebook_mode = False):
139+
"""Splits a video from video_path into frames and passes the
140+
frames through the model for predictions. Saves predicted images in save_folder.
141+
And optionally saves all the centroid predictions into a pandas.DataFrame.
142+
143+
Args:
144+
video_path (str): path to the video file.
145+
pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).
146+
save_folder (str, optional): path to folder to be saved in. Defaults to "preds".
147+
centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.
148+
notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.
149+
150+
Returns:
151+
centroid_mode set to True:
152+
pd.DataFrame : containing file name and their centriods
153+
centroid_mode set to False:
154+
list : list containing the names of the entries in the save_folder directory
155+
"""
141156
vidObj = cv2.VideoCapture(video_path)
142157
success = 1
143158
images = deque()

devolearn/lineage_population_model/lineage_population_model.py

Lines changed: 37 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,11 @@
2525

2626
class lineage_population_model():
2727
def __init__(self, device = "cpu"):
28-
28+
"""Estimate lineage populations of C. elegans embroys from videos/photos and plotting predictions.
29+
30+
Args:
31+
device (str, optional): set to "cuda", runs operations on gpu and set to "cpu", runs operations on cpu. Defaults to "cpu".
32+
"""
2933
self.device = device
3034
self.model = models.resnet18(pretrained = True)
3135
self.model.fc = nn.Linear(512, 7) ## resize last layer
@@ -57,25 +61,20 @@ def __init__(self, device = "cpu"):
5761
])
5862

5963
def predict(self, image_path):
64+
"""Loads an image from image_path and converts it to grayscale,
65+
then passes it though the model and returns a dictionary
66+
with the scaled output (see self.scaler)
6067
61-
"""
6268
reference{
6369
https://github.com/DevoLearn/devolearn#predicting-populations-of-cells-within-the-c-elegans-embryo
64-
}
65-
input{
66-
image path <str>
6770
}
6871
69-
output{
70-
dictionary containing the cell population values <dict>
71-
}
72-
73-
Loads an image from image_path and converts it to grayscale,
74-
then passes it though the model and returns a dictionary
75-
with the scaled output (see self.scaler)
72+
Args:
73+
image_path (str): path to image.
7674
75+
Returns:
76+
dict: dictionary containing the cell population values
7777
"""
78-
7978
image = cv2.imread(image_path, 0)
8079
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
8180
tensor = self.transforms(image).unsqueeze(0).to(self.device)
@@ -96,29 +95,23 @@ def predict(self, image_path):
9695
return pred_dict
9796

9897
def predict_from_video(self, video_path, csv_name = "foo.csv", save_csv = False, ignore_first_n_frames = 0, ignore_last_n_frames = 0, notebook_mode = False):
99-
100-
"""
101-
inputs{
102-
video path <str> = path to video file
103-
csv_name <str> = filename to be used to save the predictions
104-
save_csv <bool> = set to True if you want to save the predictions into a CSV files
105-
ignore_first_n_frames <int> = number of frames to drop in the start of the video
106-
ignore_last_n_frames <int> = number of frames to drop in the end of the video
107-
notebook_mode <bool> = toogle between script(False) and notebook(True), for better user interface
108-
}
109-
110-
111-
output{
112-
DataFrame containing all the preds with the corresponding column name <pandas.DataFrame>
113-
}
114-
115-
Splits a video from video_path into frames and passes the
98+
"""Splits a video from video_path into frames and passes the
11699
frames through the model for predictions. Saves all the predictions
117100
into a pandas.DataFrame which can be optionally saved as a CSV file.
118101
119102
The model was trained to make predictions upto the
120-
stage where the population of "A" lineage is 250
121-
103+
stage where the population of "A" lineage is 250
104+
105+
Args:
106+
video_path (str): path to video file
107+
csv_name (str, optional): filename to be used to save the predictions. Defaults to "foo.csv".
108+
save_csv (bool, optional): set to True if you want to save the predictions into a CSV files. Defaults to False.
109+
ignore_first_n_frames (int, optional): number of frames to drop in the start of the video. Defaults to 0.
110+
ignore_last_n_frames (int, optional): number of frames to drop in the end of the video. Defaults to 0.
111+
notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.
112+
113+
Returns:
114+
pandas.DataFrame : DataFrame containing all the preds with the corresponding column name
122115
"""
123116
A_population_upper_limit = 250
124117

@@ -180,23 +173,18 @@ def predict_from_video(self, video_path, csv_name = "foo.csv", save_csv = False
180173

181174

182175
def create_population_plot_from_video(self, video_path, save_plot = False, plot_name = "plot.png", ignore_first_n_frames = 0, ignore_last_n_frames = 0, notebook_mode = False):
183-
184-
"""
185-
inputs{
186-
video_path <str> = path to video file
187-
save_plot <bool> = set to True to save the plot as an image file
188-
plot_name <str> = filename of the plot image to be saved
189-
ignore_first_n_frames <int> = number of frames to drop in the start of the video
190-
ignore_last_n_frames <int> = number of frames to drop in the end of the video
191-
notebook_mode <bool> = toogle between script(False) and notebook(True), for better user interface
192-
}
193-
194-
outputs{
195-
plot object which can be customized further <matplotlib.pyplot>
196-
}
197-
198-
plots all the predictions from a video into a matplotlib.pyplot
199-
176+
"""Plots all the predictions from a video into a matplotlib.pyplot
177+
178+
Args:
179+
video_path ([type]): path to video file
180+
save_plot (bool, optional): set to True to save the plot as an image file. Defaults to False.
181+
plot_name (str, optional): filename of the plot image to be saved. Defaults to "plot.png".
182+
ignore_first_n_frames (int, optional): number of frames to drop in the start of the video. Defaults to 0.
183+
ignore_last_n_frames (int, optional): number of frames to drop in the end of the video. Defaults to 0.
184+
notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.
185+
186+
Returns:
187+
matplotlib.pyplot : plot object which can be customized further
200188
"""
201189
df = self.predict_from_video(video_path, ignore_first_n_frames = ignore_first_n_frames, ignore_last_n_frames = ignore_last_n_frames, notebook_mode = notebook_mode)
202190

0 commit comments

Comments
 (0)