Skip to content

instance_segmentation_base

InstanceSegmentationBaseOnnxRoboflowInferenceModel

Bases: OnnxRoboflowInferenceModel

Roboflow ONNX Instance Segmentation model.

This class implements an instance segmentation specific inference method for ONNX models provided by Roboflow.

Source code in inference/core/models/instance_segmentation_base.py
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
class InstanceSegmentationBaseOnnxRoboflowInferenceModel(OnnxRoboflowInferenceModel):
    """Roboflow ONNX Instance Segmentation model.

    This class implements an instance segmentation specific inference method
    for ONNX models provided by Roboflow.
    """

    task_type = "instance-segmentation"
    num_masks = 32

    def infer(
        self,
        image: Any,
        class_agnostic_nms: bool = False,
        confidence: float = DEFAULT_CONFIDENCE,
        disable_preproc_auto_orient: bool = False,
        disable_preproc_contrast: bool = False,
        disable_preproc_grayscale: bool = False,
        disable_preproc_static_crop: bool = False,
        iou_threshold: float = DEFAULT_IOU_THRESH,
        mask_decode_mode: str = DEFAULT_MASK_DECODE_MODE,
        max_candidates: int = DEFAULT_MAX_CANDIDATES,
        max_detections: int = DEFAUlT_MAX_DETECTIONS,
        return_image_dims: bool = False,
        tradeoff_factor: float = DEFAULT_TRADEOFF_FACTOR,
        **kwargs,
    ) -> Union[PREDICTIONS_TYPE, Tuple[PREDICTIONS_TYPE, List[Tuple[int, int]]]]:
        """
        Process an image or list of images for instance segmentation.

        Args:
            image (Any): An image or a list of images for processing.
                - can be a BGR numpy array, filepath, InferenceRequestImage, PIL Image, byte-string, etc.
            class_agnostic_nms (bool, optional): Whether to use class-agnostic non-maximum suppression. Defaults to False.
            confidence (float, optional): Confidence threshold for predictions. Defaults to 0.5.
            iou_threshold (float, optional): IoU threshold for non-maximum suppression. Defaults to 0.5.
            mask_decode_mode (str, optional): Decoding mode for masks. Choices are "accurate", "tradeoff", and "fast". Defaults to "accurate".
            max_candidates (int, optional): Maximum number of candidate detections. Defaults to 3000.
            max_detections (int, optional): Maximum number of detections after non-maximum suppression. Defaults to 300.
            return_image_dims (bool, optional): Whether to return the dimensions of the processed images. Defaults to False.
            tradeoff_factor (float, optional): Tradeoff factor used when `mask_decode_mode` is set to "tradeoff". Must be in [0.0, 1.0]. Defaults to 0.5.
            disable_preproc_auto_orient (bool, optional): If true, the auto orient preprocessing step is disabled for this call. Default is False.
            disable_preproc_contrast (bool, optional): If true, the auto contrast preprocessing step is disabled for this call. Default is False.
            disable_preproc_grayscale (bool, optional): If true, the grayscale preprocessing step is disabled for this call. Default is False.
            disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.
            **kwargs: Additional parameters to customize the inference process.

        Returns:
            Union[List[List[List[float]]], Tuple[List[List[List[float]]], List[Tuple[int, int]]]]: The list of predictions, with each prediction being a list of lists. Optionally, also returns the dimensions of the processed images.

        Raises:
            InvalidMaskDecodeArgument: If an invalid `mask_decode_mode` is provided or if the `tradeoff_factor` is outside the allowed range.

        Notes:
            - Processes input images and normalizes them.
            - Makes predictions using the ONNX runtime.
            - Applies non-maximum suppression to the predictions.
            - Decodes the masks according to the specified mode.
        """
        return super().infer(
            image,
            class_agnostic_nms=class_agnostic_nms,
            confidence=confidence,
            disable_preproc_auto_orient=disable_preproc_auto_orient,
            disable_preproc_contrast=disable_preproc_contrast,
            disable_preproc_grayscale=disable_preproc_grayscale,
            disable_preproc_static_crop=disable_preproc_static_crop,
            iou_threshold=iou_threshold,
            mask_decode_mode=mask_decode_mode,
            max_candidates=max_candidates,
            max_detections=max_detections,
            return_image_dims=return_image_dims,
            tradeoff_factor=tradeoff_factor,
        )

    def postprocess(
        self,
        predictions: Tuple[np.ndarray, np.ndarray],
        preprocess_return_metadata: PreprocessReturnMetadata,
        **kwargs,
    ) -> Union[
        InstanceSegmentationInferenceResponse,
        List[InstanceSegmentationInferenceResponse],
    ]:
        predictions, protos = predictions
        predictions = w_np_non_max_suppression(
            predictions,
            conf_thresh=kwargs["confidence"],
            iou_thresh=kwargs["iou_threshold"],
            class_agnostic=kwargs["class_agnostic_nms"],
            max_detections=kwargs["max_detections"],
            max_candidate_detections=kwargs["max_candidates"],
            num_masks=self.num_masks,
        )
        infer_shape = (self.img_size_h, self.img_size_w)
        masks = []
        mask_decode_mode = kwargs["mask_decode_mode"]
        tradeoff_factor = kwargs["tradeoff_factor"]
        img_in_shape = preprocess_return_metadata["im_shape"]

        predictions = [np.array(p) for p in predictions]

        for pred, proto, img_dim in zip(
            predictions, protos, preprocess_return_metadata["img_dims"]
        ):
            if pred.size == 0:
                masks.append([])
                continue
            if mask_decode_mode == "accurate":
                batch_masks = process_mask_accurate(
                    proto, pred[:, 7:], pred[:, :4], img_in_shape[2:]
                )
                output_mask_shape = img_in_shape[2:]
            elif mask_decode_mode == "tradeoff":
                if not 0 <= tradeoff_factor <= 1:
                    raise InvalidMaskDecodeArgument(
                        f"Invalid tradeoff_factor: {tradeoff_factor}. Must be in [0.0, 1.0]"
                    )
                batch_masks = process_mask_tradeoff(
                    proto,
                    pred[:, 7:],
                    pred[:, :4],
                    img_in_shape[2:],
                    tradeoff_factor,
                )
                output_mask_shape = batch_masks.shape[1:]
            elif mask_decode_mode == "fast":
                batch_masks = process_mask_fast(
                    proto, pred[:, 7:], pred[:, :4], img_in_shape[2:]
                )
                output_mask_shape = batch_masks.shape[1:]
            else:
                raise InvalidMaskDecodeArgument(
                    f"Invalid mask_decode_mode: {mask_decode_mode}. Must be one of ['accurate', 'fast', 'tradeoff']"
                )
            polys = masks2poly(batch_masks)
            pred[:, :4] = post_process_bboxes(
                [pred[:, :4]],
                infer_shape,
                [img_dim],
                self.preproc,
                resize_method=self.resize_method,
                disable_preproc_static_crop=preprocess_return_metadata[
                    "disable_preproc_static_crop"
                ],
            )[0]
            polys = post_process_polygons(
                img_dim,
                polys,
                output_mask_shape,
                self.preproc,
                resize_method=self.resize_method,
            )
            masks.append(polys)
        return self.make_response(
            predictions, masks, preprocess_return_metadata["img_dims"], **kwargs
        )

    def preprocess(
        self, image: Any, **kwargs
    ) -> Tuple[np.ndarray, PreprocessReturnMetadata]:
        img_in, img_dims = self.load_image(
            image,
            disable_preproc_auto_orient=kwargs.get("disable_preproc_auto_orient"),
            disable_preproc_contrast=kwargs.get("disable_preproc_contrast"),
            disable_preproc_grayscale=kwargs.get("disable_preproc_grayscale"),
            disable_preproc_static_crop=kwargs.get("disable_preproc_static_crop"),
        )

        img_in /= 255.0
        return img_in, PreprocessReturnMetadata(
            {
                "img_dims": img_dims,
                "im_shape": img_in.shape,
                "disable_preproc_static_crop": kwargs.get(
                    "disable_preproc_static_crop"
                ),
            }
        )

    def make_response(
        self,
        predictions: List[List[List[float]]],
        masks: List[List[List[float]]],
        img_dims: List[Tuple[int, int]],
        class_filter: List[str] = [],
        **kwargs,
    ) -> Union[
        InstanceSegmentationInferenceResponse,
        List[InstanceSegmentationInferenceResponse],
    ]:
        """
        Create instance segmentation inference response objects for the provided predictions and masks.

        Args:
            predictions (List[List[List[float]]]): List of prediction data, one for each image.
            masks (List[List[List[float]]]): List of masks corresponding to the predictions.
            img_dims (List[Tuple[int, int]]): List of image dimensions corresponding to the processed images.
            class_filter (List[str], optional): List of class names to filter predictions by. Defaults to an empty list (no filtering).

        Returns:
            Union[InstanceSegmentationInferenceResponse, List[InstanceSegmentationInferenceResponse]]: A single instance segmentation response or a list of instance segmentation responses based on the number of processed images.

        Notes:
            - For each image, constructs an `InstanceSegmentationInferenceResponse` object.
            - Each response contains a list of `InstanceSegmentationPrediction` objects.
        """
        responses = []
        for ind, (batch_predictions, batch_masks) in enumerate(zip(predictions, masks)):
            predictions = []
            for pred, mask in zip(batch_predictions, batch_masks):
                if class_filter and self.class_names[int(pred[6])] in class_filter:
                    # TODO: logger.debug
                    continue
                # Passing args as a dictionary here since one of the args is 'class' (a protected term in Python)
                predictions.append(
                    InstanceSegmentationPrediction(
                        **{
                            "x": pred[0] + (pred[2] - pred[0]) / 2,
                            "y": pred[1] + (pred[3] - pred[1]) / 2,
                            "width": pred[2] - pred[0],
                            "height": pred[3] - pred[1],
                            "points": [Point(x=point[0], y=point[1]) for point in mask],
                            "confidence": pred[4],
                            "class": self.class_names[int(pred[6])],
                            "class_id": int(pred[6]),
                        }
                    )
                )
            response = InstanceSegmentationInferenceResponse(
                predictions=predictions,
                image=InferenceResponseImage(
                    width=img_dims[ind][1], height=img_dims[ind][0]
                ),
            )
            responses.append(response)
        return responses

    def predict(self, img_in: np.ndarray, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
        """Runs inference on the ONNX model.

        Args:
            img_in (np.ndarray): The preprocessed image(s) to run inference on.

        Returns:
            Tuple[np.ndarray, np.ndarray]: The ONNX model predictions and the ONNX model protos.

        Raises:
            NotImplementedError: This method must be implemented by a subclass.
        """
        raise NotImplementedError("predict must be implemented by a subclass")

    def validate_model_classes(self) -> None:
        output_shape = self.get_model_output_shape()
        num_classes = get_num_classes_from_model_prediction_shape(
            output_shape[2], masks=self.num_masks
        )
        try:
            assert num_classes == self.num_classes
        except AssertionError:
            raise ValueError(
                f"Number of classes in model ({num_classes}) does not match the number of classes in the environment ({self.num_classes})"
            )

infer(image, class_agnostic_nms=False, confidence=DEFAULT_CONFIDENCE, disable_preproc_auto_orient=False, disable_preproc_contrast=False, disable_preproc_grayscale=False, disable_preproc_static_crop=False, iou_threshold=DEFAULT_IOU_THRESH, mask_decode_mode=DEFAULT_MASK_DECODE_MODE, max_candidates=DEFAULT_MAX_CANDIDATES, max_detections=DEFAUlT_MAX_DETECTIONS, return_image_dims=False, tradeoff_factor=DEFAULT_TRADEOFF_FACTOR, **kwargs)

Process an image or list of images for instance segmentation.

Parameters:

Name Type Description Default
image Any

An image or a list of images for processing. - can be a BGR numpy array, filepath, InferenceRequestImage, PIL Image, byte-string, etc.

required
class_agnostic_nms bool

Whether to use class-agnostic non-maximum suppression. Defaults to False.

False
confidence float

Confidence threshold for predictions. Defaults to 0.5.

DEFAULT_CONFIDENCE
iou_threshold float

IoU threshold for non-maximum suppression. Defaults to 0.5.

DEFAULT_IOU_THRESH
mask_decode_mode str

Decoding mode for masks. Choices are "accurate", "tradeoff", and "fast". Defaults to "accurate".

DEFAULT_MASK_DECODE_MODE
max_candidates int

Maximum number of candidate detections. Defaults to 3000.

DEFAULT_MAX_CANDIDATES
max_detections int

Maximum number of detections after non-maximum suppression. Defaults to 300.

DEFAUlT_MAX_DETECTIONS
return_image_dims bool

Whether to return the dimensions of the processed images. Defaults to False.

False
tradeoff_factor float

Tradeoff factor used when mask_decode_mode is set to "tradeoff". Must be in [0.0, 1.0]. Defaults to 0.5.

DEFAULT_TRADEOFF_FACTOR
disable_preproc_auto_orient bool

If true, the auto orient preprocessing step is disabled for this call. Default is False.

False
disable_preproc_contrast bool

If true, the auto contrast preprocessing step is disabled for this call. Default is False.

False
disable_preproc_grayscale bool

If true, the grayscale preprocessing step is disabled for this call. Default is False.

False
disable_preproc_static_crop bool

If true, the static crop preprocessing step is disabled for this call. Default is False.

False
**kwargs

Additional parameters to customize the inference process.

{}

Returns:

Type Description
Union[PREDICTIONS_TYPE, Tuple[PREDICTIONS_TYPE, List[Tuple[int, int]]]]

Union[List[List[List[float]]], Tuple[List[List[List[float]]], List[Tuple[int, int]]]]: The list of predictions, with each prediction being a list of lists. Optionally, also returns the dimensions of the processed images.

Raises:

Type Description
InvalidMaskDecodeArgument

If an invalid mask_decode_mode is provided or if the tradeoff_factor is outside the allowed range.

Notes
  • Processes input images and normalizes them.
  • Makes predictions using the ONNX runtime.
  • Applies non-maximum suppression to the predictions.
  • Decodes the masks according to the specified mode.
Source code in inference/core/models/instance_segmentation_base.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
def infer(
    self,
    image: Any,
    class_agnostic_nms: bool = False,
    confidence: float = DEFAULT_CONFIDENCE,
    disable_preproc_auto_orient: bool = False,
    disable_preproc_contrast: bool = False,
    disable_preproc_grayscale: bool = False,
    disable_preproc_static_crop: bool = False,
    iou_threshold: float = DEFAULT_IOU_THRESH,
    mask_decode_mode: str = DEFAULT_MASK_DECODE_MODE,
    max_candidates: int = DEFAULT_MAX_CANDIDATES,
    max_detections: int = DEFAUlT_MAX_DETECTIONS,
    return_image_dims: bool = False,
    tradeoff_factor: float = DEFAULT_TRADEOFF_FACTOR,
    **kwargs,
) -> Union[PREDICTIONS_TYPE, Tuple[PREDICTIONS_TYPE, List[Tuple[int, int]]]]:
    """
    Process an image or list of images for instance segmentation.

    Args:
        image (Any): An image or a list of images for processing.
            - can be a BGR numpy array, filepath, InferenceRequestImage, PIL Image, byte-string, etc.
        class_agnostic_nms (bool, optional): Whether to use class-agnostic non-maximum suppression. Defaults to False.
        confidence (float, optional): Confidence threshold for predictions. Defaults to 0.5.
        iou_threshold (float, optional): IoU threshold for non-maximum suppression. Defaults to 0.5.
        mask_decode_mode (str, optional): Decoding mode for masks. Choices are "accurate", "tradeoff", and "fast". Defaults to "accurate".
        max_candidates (int, optional): Maximum number of candidate detections. Defaults to 3000.
        max_detections (int, optional): Maximum number of detections after non-maximum suppression. Defaults to 300.
        return_image_dims (bool, optional): Whether to return the dimensions of the processed images. Defaults to False.
        tradeoff_factor (float, optional): Tradeoff factor used when `mask_decode_mode` is set to "tradeoff". Must be in [0.0, 1.0]. Defaults to 0.5.
        disable_preproc_auto_orient (bool, optional): If true, the auto orient preprocessing step is disabled for this call. Default is False.
        disable_preproc_contrast (bool, optional): If true, the auto contrast preprocessing step is disabled for this call. Default is False.
        disable_preproc_grayscale (bool, optional): If true, the grayscale preprocessing step is disabled for this call. Default is False.
        disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.
        **kwargs: Additional parameters to customize the inference process.

    Returns:
        Union[List[List[List[float]]], Tuple[List[List[List[float]]], List[Tuple[int, int]]]]: The list of predictions, with each prediction being a list of lists. Optionally, also returns the dimensions of the processed images.

    Raises:
        InvalidMaskDecodeArgument: If an invalid `mask_decode_mode` is provided or if the `tradeoff_factor` is outside the allowed range.

    Notes:
        - Processes input images and normalizes them.
        - Makes predictions using the ONNX runtime.
        - Applies non-maximum suppression to the predictions.
        - Decodes the masks according to the specified mode.
    """
    return super().infer(
        image,
        class_agnostic_nms=class_agnostic_nms,
        confidence=confidence,
        disable_preproc_auto_orient=disable_preproc_auto_orient,
        disable_preproc_contrast=disable_preproc_contrast,
        disable_preproc_grayscale=disable_preproc_grayscale,
        disable_preproc_static_crop=disable_preproc_static_crop,
        iou_threshold=iou_threshold,
        mask_decode_mode=mask_decode_mode,
        max_candidates=max_candidates,
        max_detections=max_detections,
        return_image_dims=return_image_dims,
        tradeoff_factor=tradeoff_factor,
    )

make_response(predictions, masks, img_dims, class_filter=[], **kwargs)

Create instance segmentation inference response objects for the provided predictions and masks.

Parameters:

Name Type Description Default
predictions List[List[List[float]]]

List of prediction data, one for each image.

required
masks List[List[List[float]]]

List of masks corresponding to the predictions.

required
img_dims List[Tuple[int, int]]

List of image dimensions corresponding to the processed images.

required
class_filter List[str]

List of class names to filter predictions by. Defaults to an empty list (no filtering).

[]

Returns:

Type Description
Union[InstanceSegmentationInferenceResponse, List[InstanceSegmentationInferenceResponse]]

Union[InstanceSegmentationInferenceResponse, List[InstanceSegmentationInferenceResponse]]: A single instance segmentation response or a list of instance segmentation responses based on the number of processed images.

Notes
  • For each image, constructs an InstanceSegmentationInferenceResponse object.
  • Each response contains a list of InstanceSegmentationPrediction objects.
Source code in inference/core/models/instance_segmentation_base.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
def make_response(
    self,
    predictions: List[List[List[float]]],
    masks: List[List[List[float]]],
    img_dims: List[Tuple[int, int]],
    class_filter: List[str] = [],
    **kwargs,
) -> Union[
    InstanceSegmentationInferenceResponse,
    List[InstanceSegmentationInferenceResponse],
]:
    """
    Create instance segmentation inference response objects for the provided predictions and masks.

    Args:
        predictions (List[List[List[float]]]): List of prediction data, one for each image.
        masks (List[List[List[float]]]): List of masks corresponding to the predictions.
        img_dims (List[Tuple[int, int]]): List of image dimensions corresponding to the processed images.
        class_filter (List[str], optional): List of class names to filter predictions by. Defaults to an empty list (no filtering).

    Returns:
        Union[InstanceSegmentationInferenceResponse, List[InstanceSegmentationInferenceResponse]]: A single instance segmentation response or a list of instance segmentation responses based on the number of processed images.

    Notes:
        - For each image, constructs an `InstanceSegmentationInferenceResponse` object.
        - Each response contains a list of `InstanceSegmentationPrediction` objects.
    """
    responses = []
    for ind, (batch_predictions, batch_masks) in enumerate(zip(predictions, masks)):
        predictions = []
        for pred, mask in zip(batch_predictions, batch_masks):
            if class_filter and self.class_names[int(pred[6])] in class_filter:
                # TODO: logger.debug
                continue
            # Passing args as a dictionary here since one of the args is 'class' (a protected term in Python)
            predictions.append(
                InstanceSegmentationPrediction(
                    **{
                        "x": pred[0] + (pred[2] - pred[0]) / 2,
                        "y": pred[1] + (pred[3] - pred[1]) / 2,
                        "width": pred[2] - pred[0],
                        "height": pred[3] - pred[1],
                        "points": [Point(x=point[0], y=point[1]) for point in mask],
                        "confidence": pred[4],
                        "class": self.class_names[int(pred[6])],
                        "class_id": int(pred[6]),
                    }
                )
            )
        response = InstanceSegmentationInferenceResponse(
            predictions=predictions,
            image=InferenceResponseImage(
                width=img_dims[ind][1], height=img_dims[ind][0]
            ),
        )
        responses.append(response)
    return responses

predict(img_in, **kwargs)

Runs inference on the ONNX model.

Parameters:

Name Type Description Default
img_in ndarray

The preprocessed image(s) to run inference on.

required

Returns:

Type Description
Tuple[ndarray, ndarray]

Tuple[np.ndarray, np.ndarray]: The ONNX model predictions and the ONNX model protos.

Raises:

Type Description
NotImplementedError

This method must be implemented by a subclass.

Source code in inference/core/models/instance_segmentation_base.py
276
277
278
279
280
281
282
283
284
285
286
287
288
def predict(self, img_in: np.ndarray, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
    """Runs inference on the ONNX model.

    Args:
        img_in (np.ndarray): The preprocessed image(s) to run inference on.

    Returns:
        Tuple[np.ndarray, np.ndarray]: The ONNX model predictions and the ONNX model protos.

    Raises:
        NotImplementedError: This method must be implemented by a subclass.
    """
    raise NotImplementedError("predict must be implemented by a subclass")