Skip to content

preprocess

letterbox_image(image, desired_size, color=(0, 0, 0))

Resize and pad image to fit the desired size, preserving its aspect ratio.

Parameters: - image: numpy array representing the image. - desired_size: tuple (width, height) representing the target dimensions. - color: tuple (B, G, R) representing the color to pad with.

Returns: - letterboxed image.

Source code in inference/core/utils/preprocess.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def letterbox_image(
    image: np.ndarray,
    desired_size: Tuple[int, int],
    color: Tuple[int, int, int] = (0, 0, 0),
) -> np.ndarray:
    """
    Resize and pad image to fit the desired size, preserving its aspect ratio.

    Parameters:
    - image: numpy array representing the image.
    - desired_size: tuple (width, height) representing the target dimensions.
    - color: tuple (B, G, R) representing the color to pad with.

    Returns:
    - letterboxed image.
    """
    resized_img = resize_image_keeping_aspect_ratio(
        image=image,
        desired_size=desired_size,
    )
    new_height, new_width = resized_img.shape[:2]
    top_padding = (desired_size[1] - new_height) // 2
    bottom_padding = desired_size[1] - new_height - top_padding
    left_padding = (desired_size[0] - new_width) // 2
    right_padding = desired_size[0] - new_width - left_padding
    return cv2.copyMakeBorder(
        resized_img,
        top_padding,
        bottom_padding,
        left_padding,
        right_padding,
        cv2.BORDER_CONSTANT,
        value=color,
    )

prepare(image, preproc, disable_preproc_contrast=False, disable_preproc_grayscale=False, disable_preproc_static_crop=False)

Prepares an image by applying a series of preprocessing steps defined in the preproc dictionary.

Parameters:

Name Type Description Default
image Image

The input PIL image object.

required
preproc dict

Dictionary containing preprocessing steps. Example: { "resize": {"enabled": true, "width": 416, "height": 416, "format": "Stretch to"}, "static-crop": {"y_min": 25, "x_max": 75, "y_max": 75, "enabled": true, "x_min": 25}, "auto-orient": {"enabled": true}, "grayscale": {"enabled": true}, "contrast": {"enabled": true, "type": "Adaptive Equalization"} }

required
disable_preproc_contrast bool

If true, the contrast preprocessing step is disabled for this call. Default is False.

False
disable_preproc_grayscale bool

If true, the grayscale preprocessing step is disabled for this call. Default is False.

False
disable_preproc_static_crop bool

If true, the static crop preprocessing step is disabled for this call. Default is False.

False

Returns:

Name Type Description
ndarray

PIL.Image.Image: The preprocessed image object.

tuple Tuple[int, int]

The dimensions of the image.

Note

The function uses global flags like DISABLE_PREPROC_AUTO_ORIENT, DISABLE_PREPROC_STATIC_CROP, etc. to conditionally enable or disable certain preprocessing steps.

Source code in inference/core/utils/preprocess.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def prepare(
    image: np.ndarray,
    preproc,
    disable_preproc_contrast: bool = False,
    disable_preproc_grayscale: bool = False,
    disable_preproc_static_crop: bool = False,
) -> Tuple[np.ndarray, Tuple[int, int]]:
    """
    Prepares an image by applying a series of preprocessing steps defined in the `preproc` dictionary.

    Args:
        image (PIL.Image.Image): The input PIL image object.
        preproc (dict): Dictionary containing preprocessing steps. Example:
            {
                "resize": {"enabled": true, "width": 416, "height": 416, "format": "Stretch to"},
                "static-crop": {"y_min": 25, "x_max": 75, "y_max": 75, "enabled": true, "x_min": 25},
                "auto-orient": {"enabled": true},
                "grayscale": {"enabled": true},
                "contrast": {"enabled": true, "type": "Adaptive Equalization"}
            }
        disable_preproc_contrast (bool, optional): If true, the contrast preprocessing step is disabled for this call. Default is False.
        disable_preproc_grayscale (bool, optional): If true, the grayscale preprocessing step is disabled for this call. Default is False.
        disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.

    Returns:
        PIL.Image.Image: The preprocessed image object.
        tuple: The dimensions of the image.

    Note:
        The function uses global flags like `DISABLE_PREPROC_AUTO_ORIENT`, `DISABLE_PREPROC_STATIC_CROP`, etc.
        to conditionally enable or disable certain preprocessing steps.
    """
    try:
        h, w = image.shape[0:2]
        img_dims = (h, w)
        if static_crop_should_be_applied(
            preprocessing_config=preproc,
            disable_preproc_static_crop=disable_preproc_static_crop,
        ):
            image = take_static_crop(
                image=image, crop_parameters=preproc[STATIC_CROP_KEY]
            )
        if contrast_adjustments_should_be_applied(
            preprocessing_config=preproc,
            disable_preproc_contrast=disable_preproc_contrast,
        ):
            adjustment_type = ContrastAdjustmentType(preproc[CONTRAST_KEY][TYPE_KEY])
            image = apply_contrast_adjustment(
                image=image, adjustment_type=adjustment_type
            )
        if grayscale_conversion_should_be_applied(
            preprocessing_config=preproc,
            disable_preproc_grayscale=disable_preproc_grayscale,
        ):
            image = apply_grayscale_conversion(image=image)
        return image, img_dims
    except KeyError as error:
        raise PreProcessingError(
            f"Pre-processing of image failed due to misconfiguration. Missing key: {error}."
        ) from error

resize_image_keeping_aspect_ratio(image, desired_size)

Resize reserving its aspect ratio.

Parameters: - image: numpy array representing the image. - desired_size: tuple (width, height) representing the target dimensions.

Source code in inference/core/utils/preprocess.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
def resize_image_keeping_aspect_ratio(
    image: np.ndarray,
    desired_size: Tuple[int, int],
) -> np.ndarray:
    """
    Resize reserving its aspect ratio.

    Parameters:
    - image: numpy array representing the image.
    - desired_size: tuple (width, height) representing the target dimensions.
    """
    img_ratio = image.shape[1] / image.shape[0]
    desired_ratio = desired_size[0] / desired_size[1]

    # Determine the new dimensions
    if img_ratio >= desired_ratio:
        # Resize by width
        new_width = desired_size[0]
        new_height = int(desired_size[0] / img_ratio)
    else:
        # Resize by height
        new_height = desired_size[1]
        new_width = int(desired_size[1] * img_ratio)

    # Resize the image to new dimensions
    return cv2.resize(image, (new_width, new_height))