Commit 9ae415e6 authored by Lamping, Christian's avatar Lamping, Christian
Browse files


parent b446189a
......@@ -568,7 +568,7 @@ def display_images(images, titles=None, cols=4, cmap=None, norm=None,
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interpolation to use for display.
image = np.array(images)
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
......@@ -698,6 +698,25 @@ def display_instances(image, boxes, masks, class_ids, class_names,
if auto_show:
def pred_images_from_folder(directory):
for filename in os.listdir(directory):
if filename.endswith(".png"):
print(os.path.join(directory, filename))
path = os.path.join(directory, filename)
augs = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
img = np.array("RGB"))
img, window, scale, padding, crop = resize_image(
img, [img.shape[1], img.shape[0]],
img = augs(img)
pred_mask, pred_boxes, class_ids, pred_score = get_prediction(model, img, threshold)
masked_image = display_instances(np.array(img.mul(255)).transpose(1,2,0), pred_boxes, pred_mask, class_ids, dataset_train.class_names, auto_show=False)
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment