mirror of
https://github.com/vladmandic/sdnext.git
synced 2026-01-29 05:02:09 +03:00
100 lines
3.4 KiB
Python
100 lines
3.4 KiB
Python
# Copyright 2023 Bingxin Ke, ETH Zurich. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# --------------------------------------------------------------------------
|
|
# If you find this code useful, we kindly ask you to cite our paper in your work.
|
|
# Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
|
|
# More information about the method can be found at https://marigoldmonodepth.github.io
|
|
# --------------------------------------------------------------------------
|
|
|
|
|
|
import matplotlib as mpl
|
|
import numpy as np
|
|
import torch
|
|
from PIL import Image
|
|
|
|
|
|
def colorize_depth_maps(
|
|
depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None
|
|
):
|
|
"""
|
|
Colorize depth maps.
|
|
"""
|
|
assert len(depth_map.shape) >= 2, "Invalid dimension"
|
|
|
|
if isinstance(depth_map, torch.Tensor):
|
|
depth = depth_map.detach().clone().squeeze().numpy()
|
|
elif isinstance(depth_map, np.ndarray):
|
|
depth = depth_map.copy().squeeze()
|
|
# reshape to [ (B,) H, W ]
|
|
if depth.ndim < 3:
|
|
depth = depth[np.newaxis, :, :]
|
|
|
|
# colorize
|
|
cm = mpl.colormaps[cmap]
|
|
depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
|
|
img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
|
|
img_colored_np = np.rollaxis(img_colored_np, 3, 1)
|
|
|
|
if valid_mask is not None:
|
|
if isinstance(depth_map, torch.Tensor):
|
|
valid_mask = valid_mask.detach().numpy()
|
|
valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
|
|
if valid_mask.ndim < 3:
|
|
valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
|
|
else:
|
|
valid_mask = valid_mask[:, np.newaxis, :, :]
|
|
valid_mask = np.repeat(valid_mask, 3, axis=1)
|
|
img_colored_np[~valid_mask] = 0
|
|
|
|
if isinstance(depth_map, torch.Tensor):
|
|
img_colored = torch.from_numpy(img_colored_np).float()
|
|
elif isinstance(depth_map, np.ndarray):
|
|
img_colored = img_colored_np
|
|
|
|
return img_colored
|
|
|
|
|
|
def chw2hwc(chw):
|
|
assert 3 == len(chw.shape)
|
|
if isinstance(chw, torch.Tensor):
|
|
hwc = torch.permute(chw, (1, 2, 0))
|
|
elif isinstance(chw, np.ndarray):
|
|
hwc = np.moveaxis(chw, 0, -1)
|
|
return hwc
|
|
|
|
|
|
def resize_max_res(img: Image.Image, max_edge_resolution: int) -> Image.Image:
|
|
"""
|
|
Resize image to limit maximum edge length while keeping aspect ratio.
|
|
|
|
Args:
|
|
img (`Image.Image`):
|
|
Image to be resized.
|
|
max_edge_resolution (`int`):
|
|
Maximum edge length (pixel).
|
|
|
|
Returns:
|
|
`Image.Image`: Resized image.
|
|
"""
|
|
original_width, original_height = img.size
|
|
downscale_factor = min(
|
|
max_edge_resolution / original_width, max_edge_resolution / original_height
|
|
)
|
|
|
|
new_width = int(original_width * downscale_factor)
|
|
new_height = int(original_height * downscale_factor)
|
|
|
|
resized_img = img.resize((new_width, new_height))
|
|
return resized_img
|