ToPILImage in PyTorch

Buy Me a Coffee☕ *Memos: My post explains how to convert and scale a PIL Image to an Image in PyTorch. My post explains Compose(). My post explains ToImage(). My post explains ToDtype(). My post explains ToTensor(). My post explains PILToTensor(). My post explains OxfordIIITPet(). ToPILImage() can convert an Image([..., C, H, W]), tensor or ndarray to a PIL(Pillow library) Image([H, W, C]) and doesn't scale its values to [0.0, 1.0] as shown below: *Memos: The 1st argument is img(Required-Type:PIL Image, Image or tensor/ndarray(int/float/complex/bool)): *Memos: A tensor must be 2D or 3D. A ndarray must be 2D or 3D. Don't use img=. v2 is recommended to use according to V1 or V2? Which one should I use?. from torchvision.datasets import OxfordIIITPet from torchvision.transforms.v2 import ToPILImage import torch import numpy as np ToPILImage() # ToPILImage() Image_data = OxfordIIITPet( root="data", transform=ToImage() ) Tensor_data = OxfordIIITPet( root="data", transform=PILToTensor() ) tp = ToPILImage() tp(Image_data[0]) tp(Tensor_data[0]) # (, 0) tp((torch.tensor([[0, 1, 2]]), 0)) # int64 tp((torch.tensor([[0, 1, 2]], dtype=torch.int64), 0)) tp(torch.tensor([[0, 1, 2]], dtype=torch.int64)) # TypeError: Input type int64 is not supported tp((torch.tensor([[0, 1, 2]], dtype=torch.int32), 0)) tp((torch.tensor([[[0, 1, 2]]], dtype=torch.int32), 0)) # (, 0) print(tp(torch.tensor([[0, 1, 2]], dtype=torch.int32))) print(tp(torch.tensor([[[0, 1, 2]]], dtype=torch.int32))) # tp((torch.tensor([[0., 1., 2.]]), 0)) # float32 tp((torch.tensor([[0., 1., 2.]], dtype=torch.float32), 0)) tp((torch.tensor([[0., 1., 2.]], dtype=torch.float64), 0)) # (, 0) print(tp(torch.tensor([[0., 1., 2.]], dtype=torch.float32))) print(tp(torch.tensor([[0., 1., 2.]], dtype=torch.float64))) # tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]]), 0)) # complex64 tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex64), 0)) tp(torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex64)) # TypeError: Input type complex64 is not supported tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex32), 0)) tp(torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex32)) # TypeError: Got unsupported ScalarType ComplexHalf tp((np.array([[0, 1, 2]]), 0)) # int32 tp((np.array([[0, 1, 2]], dtype=np.int32), 0)) # (, 0) print(tp(np.array([[0, 1, 2]], dtype=np.int32))) # tp((np.array([[0, 1, 2]], dtype=np.int64), 0)) tp(np.array([[0, 1, 2]], dtype=np.int64)) # TypeError: Input type int64 is not supported tp((np.array([[0., 1., 2.]]), 0)) # float64 tp((np.array([[0., 1., 2.]], dtype=np.float64), 0)) tp((np.array([[0., 1., 2.]], dtype=np.float32), 0)) # (, 0) print(tp(np.array([[0., 1., 2.]], dtype=np.float64))) print(tp(np.array([[0., 1., 2.]], dtype=np.float32))) # tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]]), 0)) tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex128), 0)) tp(np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex128)) # TypeError: Input type complex128 is not supported tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex64), 0)) tp(np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex64)) # TypeError: Input type complex64 is not supported tp((np.array([[True, False, True]]), 0)) # bool tp((np.array([[True, False, True]], dtype=bool), 0)) tp(np.array([[True, False, True]], dtype=bool)) # TypeError: Input type bool is not supported

Apr 25, 2025 - 16:16
 0
ToPILImage in PyTorch

Buy Me a Coffee

*Memos:

ToPILImage() can convert an Image([..., C, H, W]), tensor or ndarray to a PIL(Pillow library) Image([H, W, C]) and doesn't scale its values to [0.0, 1.0] as shown below:
*Memos:

  • The 1st argument is img(Required-Type:PIL Image, Image or tensor/ndarray(int/float/complex/bool)): *Memos:
    • A tensor must be 2D or 3D.
    • A ndarray must be 2D or 3D.
    • Don't use img=.
  • v2 is recommended to use according to V1 or V2? Which one should I use?.
from torchvision.datasets import OxfordIIITPet
from torchvision.transforms.v2 import ToPILImage
import torch
import numpy as np

ToPILImage()
# ToPILImage()

Image_data = OxfordIIITPet(
    root="data",
    transform=ToImage()
)

Tensor_data = OxfordIIITPet(
    root="data",
    transform=PILToTensor()
)

tp = ToPILImage()

tp(Image_data[0])
tp(Tensor_data[0])
# (, 0)

tp((torch.tensor([[0, 1, 2]]), 0)) # int64
tp((torch.tensor([[0, 1, 2]], dtype=torch.int64), 0))
tp(torch.tensor([[0, 1, 2]], dtype=torch.int64))
# TypeError: Input type int64 is not supported

tp((torch.tensor([[0, 1, 2]], dtype=torch.int32), 0))
tp((torch.tensor([[[0, 1, 2]]], dtype=torch.int32), 0))
# (, 0)

print(tp(torch.tensor([[0, 1, 2]], dtype=torch.int32)))
print(tp(torch.tensor([[[0, 1, 2]]], dtype=torch.int32)))
# 

tp((torch.tensor([[0., 1., 2.]]), 0)) # float32
tp((torch.tensor([[0., 1., 2.]], dtype=torch.float32), 0))
tp((torch.tensor([[0., 1., 2.]], dtype=torch.float64), 0))
# (, 0)

print(tp(torch.tensor([[0., 1., 2.]], dtype=torch.float32)))
print(tp(torch.tensor([[0., 1., 2.]], dtype=torch.float64)))
# 

tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]]), 0)) # complex64
tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex64), 0))
tp(torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex64))
# TypeError: Input type complex64 is not supported

tp((torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex32), 0))
tp(torch.tensor([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=torch.complex32))
# TypeError: Got unsupported ScalarType ComplexHalf

tp((np.array([[0, 1, 2]]), 0)) # int32
tp((np.array([[0, 1, 2]], dtype=np.int32), 0))
# (, 0)

print(tp(np.array([[0, 1, 2]], dtype=np.int32)))
# 

tp((np.array([[0, 1, 2]], dtype=np.int64), 0))
tp(np.array([[0, 1, 2]], dtype=np.int64))
# TypeError: Input type int64 is not supported

tp((np.array([[0., 1., 2.]]), 0)) # float64
tp((np.array([[0., 1., 2.]], dtype=np.float64), 0))
tp((np.array([[0., 1., 2.]], dtype=np.float32), 0))
# (, 0)

print(tp(np.array([[0., 1., 2.]], dtype=np.float64)))
print(tp(np.array([[0., 1., 2.]], dtype=np.float32)))
# 

tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]]), 0))
tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex128), 0))
tp(np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex128))
# TypeError: Input type complex128 is not supported

tp((np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex64), 0))
tp(np.array([[0.+0.j, 1.+0.j, 2.+0.j]], dtype=np.complex64))
# TypeError: Input type complex64 is not supported

tp((np.array([[True, False, True]]), 0)) # bool
tp((np.array([[True, False, True]], dtype=bool), 0))
tp(np.array([[True, False, True]], dtype=bool))
# TypeError: Input type bool is not supported