ToImage in PyTorch

Buy Me a Coffee☕ *My post explains OxfordIIITPet(). ToImage() can convert a PIL(Pillow library) Image, tensor or ndarray to an Image as shown below: *Memos: The 1st argument is img(Required-Type:PIL Image or tensor/ndarray(int/float/complex/bool)): *Memos: A tensor must be 2D or more D. A ndarray must be 0D to 3D. Don't use img=. v2 is recommended to use according to V1 or V2? Which one should I use?. from torchvision.datasets import OxfordIIITPet from torchvision.transforms.v2 import ToImage ToImage() # ToImage() origin_data = OxfordIIITPet( # It's PIL Image. root="data", transform=None ) Image_data = OxfordIIITPet( root="data", transform=ToImage() ) Image_data # Dataset OxfordIIITPet # Number of datapoints: 3680 # Root location: data # StandardTransform # Transform: ToImage() Image_data[0] # (Image([[[37, 35, 36, ..., 247, 249, 249], # [35, 35, 37, ..., 246, 248, 249], # ..., # [28, 28, 27, ..., 59, 65, 76]], # [[20, 18, 19, ..., 248, 248, 248], # [18, 18, 20, ..., 247, 247, 248], # ..., # [27, 27, 27, ..., 94, 106, 117]], # [[12, 10, 11, ..., 253, 253, 253], # [10, 10, 12, ..., 251, 252, 253], # ..., # [35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8,), 0) Image_data[0][0].size() # torch.Size([3, 500, 394]) Image_data[0][0] # Image([[[37, 35, 36, ..., 247, 249, 249], # [35, 35, 37, ..., 246, 248, 249], # ..., # [28, 28, 27, ..., 59, 65, 76]], # [[20, 18, 19, ..., 248, 248, 248], # [18, 18, 20, ..., 247, 247, 248], # ..., # [27, 27, 27, ..., 94, 106, 117]], # [[12, 10, 11, ..., 253, 253, 253], # [10, 10, 12, ..., 251, 252, 253], # ..., # [35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8,) Image_data[0][1] # 0 import matplotlib.pyplot as plt plt.imshow(X=Image_data[0][0]) # TypeError: Invalid shape (3, 500, 394) for image data ti = ToImage() ti(origin_data[0][0]) # Image([[[37, 35, 36, ..., 247, 249, 249], # [35, 35, 37, ..., 246, 248, 249], # ..., # [28, 28, 27, ..., 59, 65, 76]], # [[20, 18, 19, ..., 248, 248, 248], # [18, 18, 20, ..., 247, 247, 248], # ..., # [27, 27, 27, ..., 94, 106, 117]], # [[12, 10, 11, ..., 253, 253, 253], # [10, 10, 12, ..., 251, 252, 253], # ..., # [35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8,) plt.imshow(ti(origin_data[0][0])) # TypeError: Invalid shape (3, 500, 394) for image data ti(torch.tensor([[0, 1, 2]])) # Image([[[0, 1, 2]]],) ti(np.array([[0, 1, 2]])) # Image([[[0, 1, 2]]], dtype=torch.int32,)

Apr 20, 2025 - 18:57
 0
ToImage in PyTorch

Buy Me a Coffee

*My post explains OxfordIIITPet().

ToImage() can convert a PIL(Pillow library) Image, tensor or ndarray to an Image as shown below:
*Memos:

  • The 1st argument is img(Required-Type:PIL Image or tensor/ndarray(int/float/complex/bool)): *Memos:
    • A tensor must be 2D or more D.
    • A ndarray must be 0D to 3D.
    • Don't use img=.
  • v2 is recommended to use according to V1 or V2? Which one should I use?.
from torchvision.datasets import OxfordIIITPet
from torchvision.transforms.v2 import ToImage

ToImage()
# ToImage()

origin_data = OxfordIIITPet( # It's PIL Image. 
    root="data",
    transform=None
)

Image_data = OxfordIIITPet(
    root="data",
    transform=ToImage()
)

Image_data
# Dataset OxfordIIITPet
#     Number of datapoints: 3680
#     Root location: data
#     StandardTransform
# Transform: ToImage()

Image_data[0]
# (Image([[[37, 35, 36, ..., 247, 249, 249],
#          [35, 35, 37, ..., 246, 248, 249],
#          ...,
#          [28, 28, 27, ..., 59, 65, 76]],
#         [[20, 18, 19, ..., 248, 248, 248],
#          [18, 18, 20, ..., 247, 247, 248],
#          ...,
#          [27, 27, 27, ..., 94, 106, 117]], 
#         [[12, 10, 11, ..., 253, 253, 253],
#          [10, 10, 12, ..., 251, 252, 253],
#          ...,
#          [35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8,), 0)

Image_data[0][0].size()
# torch.Size([3, 500, 394])

Image_data[0][0]
# Image([[[37, 35, 36, ..., 247, 249, 249],
#         [35, 35, 37, ..., 246, 248, 249],
#         ...,
#         [28, 28, 27, ...,  59, 65, 76]],
#        [[20, 18, 19, ..., 248, 248, 248],
#         [18, 18, 20, ..., 247, 247, 248],
#         ...,
#         [27, 27, 27, ...,  94, 106, 117]],
#        [[12, 10, 11, ..., 253, 253, 253],
#         [10, 10, 12, ..., 251, 252, 253],
#         ...,
#         [35, 35, 35,  ..., 214, 232, 223]]], dtype=torch.uint8,)

Image_data[0][1]
# 0

import matplotlib.pyplot as plt

plt.imshow(X=Image_data[0][0])
# TypeError: Invalid shape (3, 500, 394) for image data

ti = ToImage()

ti(origin_data[0][0])
# Image([[[37, 35, 36, ..., 247, 249, 249],
#         [35, 35, 37, ..., 246, 248, 249],
#         ...,
#         [28, 28, 27, ...,  59, 65, 76]],
#        [[20, 18, 19, ..., 248, 248, 248],
#         [18, 18, 20, ..., 247, 247, 248],
#         ...,
#         [27, 27, 27, ...,  94, 106, 117]],
#        [[12, 10, 11, ..., 253, 253, 253],
#         [10, 10, 12, ..., 251, 252, 253],
#         ...,
#         [35, 35, 35,  ..., 214, 232, 223]]], dtype=torch.uint8,)

plt.imshow(ti(origin_data[0][0]))
# TypeError: Invalid shape (3, 500, 394) for image data

ti(torch.tensor([[0, 1, 2]]))
# Image([[[0, 1, 2]]],)

ti(np.array([[0, 1, 2]]))
# Image([[[0, 1, 2]]], dtype=torch.int32,)