Conversion with PIL Image, PyTorch tensor & NumPy array
Buy Me a Coffee☕ You can do conversion with PIL Image, PyTorch tensor and NumPy array as shown below: from torchvision.datasets import OxfordIIITPet origin_data = OxfordIIITPet( root="data", transform=None ) import matplotlib.pyplot as plt plt.figure(figsize=[7, 9]) plt.title(label="s500_394origin_data", fontsize=14) plt.imshow(X=origin_data[0][0]) plt.show() print() PIL Image[H, W, C] => PyTorch Tensor[C, H, W] => NumPy Array[H, W, C]: from torchvision.datasets import OxfordIIITPet import numpy as np origin_data = OxfordIIITPet( root="data", transform=None ) ptt = PILToTensor() pytorchimagetensor = ptt(origin_data[0][0]) # tensor([[[ 37, 35, 36, ..., 247, 249, 249], # [ 35, 35, 37, ..., 246, 248, 249], # ..., # [ 28, 28, 27, ..., 59, 65, 76]], # [[ 20, 18, 19, ..., 248, 248, 248], # [ 18, 18, 20, ..., 247, 247, 248], # ..., # [ 27, 27, 27, ..., 94, 106, 117]], # [[ 12, 10, 11, ..., 253, 253, 253], # [ 10, 10, 12, ..., 251, 252, 253], # ..., # [ 35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8) numpyimagearray = pytorchimagetensor.permute(1, 2, 0).numpy() numpyimagearray = np.array(object=pytorchimagetensor.permute(1, 2, 0)) numpyimagearray = np.asarray(pytorchimagetensor.permute(1, 2, 0)) numpyimagearray # array([[[ 37 20 12] # [ 35 18 10] # ... # [249 248 253]] # [[ 35 18 10] # [ 35 18 10] # ... # [249 248 253]] # [[ 35 18 10] # [ 36 19 11] # ... # [250 249 254]] # ... # [[ 5 6 24] # [ 4 5 23] # ... # [ 69 110 224]] # [[ 4 3 19] # [ 3 2 18] # ... # [ 64 108 229]] # [[ 28 27 35] # [ 28 27 35] # ... # [ 76 117 223]]], dtype=uint8) PIL Image[H, W, C] => NumPy Array[H, W, C] => PyTorch Tensor[C, H, W]: from torchvision.datasets import OxfordIIITPet import numpy as np origin_data = OxfordIIITPet( root="data", transform=None ) numpyimagearray = np.array(object=origin_data[0][0]) numpyimagearray = np.asarray(origin_data[0][0]) numpyimagearray # array([[[ 37 20 12] # [ 35 18 10] # ... # [249 248 253]] # [[ 35 18 10] # [ 35 18 10] # ... # [249 248 253]] # [[ 35 18 10] # [ 36 19 11] # ... # [250 249 254]] # ... # [[ 5 6 24] # [ 4 5 23] # ... # [ 69 110 224]] # [[ 4 3 19] # [ 3 2 18] # ... # [ 64 108 229]] # [[ 28 27 35] # [ 28 27 35] # ... # [ 76 117 223]]], dtype=uint8) pytorchimagetensor = torch.from_numpy(numpyimagearray).permute(dims=[2, 0, 1]) pytorchimagetensor = torch.tensor(numpyimagearray).permute(dims=[2, 0, 1]) pytorchimagetensor # tensor([[[ 37, 35, 36, ..., 247, 249, 249], # [ 35, 35, 37, ..., 246, 248, 249], # ..., # [ 28, 28, 27, ..., 59, 65, 76]], # [[ 20, 18, 19, ..., 248, 248, 248], # [ 18, 18, 20, ..., 247, 247, 248], # ..., # [ 27, 27, 27, ..., 94, 106, 117]], # [[ 12, 10, 11, ..., 253, 253, 253], # [ 10, 10, 12, ..., 251, 252, 253], # ..., # [ 35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8)

You can do conversion with PIL Image, PyTorch tensor and NumPy array as shown below:
from torchvision.datasets import OxfordIIITPet
origin_data = OxfordIIITPet(
root="data",
transform=None
)
import matplotlib.pyplot as plt
plt.figure(figsize=[7, 9])
plt.title(label="s500_394origin_data", fontsize=14)
plt.imshow(X=origin_data[0][0])
plt.show()
print()
PIL Image[H, W, C]
=> PyTorch Tensor[C, H, W]
=> NumPy Array[H, W, C]
:
from torchvision.datasets import OxfordIIITPet
import numpy as np
origin_data = OxfordIIITPet(
root="data",
transform=None
)
ptt = PILToTensor()
pytorchimagetensor = ptt(origin_data[0][0])
# tensor([[[ 37, 35, 36, ..., 247, 249, 249],
# [ 35, 35, 37, ..., 246, 248, 249],
# ...,
# [ 28, 28, 27, ..., 59, 65, 76]],
# [[ 20, 18, 19, ..., 248, 248, 248],
# [ 18, 18, 20, ..., 247, 247, 248],
# ...,
# [ 27, 27, 27, ..., 94, 106, 117]],
# [[ 12, 10, 11, ..., 253, 253, 253],
# [ 10, 10, 12, ..., 251, 252, 253],
# ...,
# [ 35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8)
numpyimagearray = pytorchimagetensor.permute(1, 2, 0).numpy()
numpyimagearray = np.array(object=pytorchimagetensor.permute(1, 2, 0))
numpyimagearray = np.asarray(pytorchimagetensor.permute(1, 2, 0))
numpyimagearray
# array([[[ 37 20 12]
# [ 35 18 10]
# ...
# [249 248 253]]
# [[ 35 18 10]
# [ 35 18 10]
# ...
# [249 248 253]]
# [[ 35 18 10]
# [ 36 19 11]
# ...
# [250 249 254]]
# ...
# [[ 5 6 24]
# [ 4 5 23]
# ...
# [ 69 110 224]]
# [[ 4 3 19]
# [ 3 2 18]
# ...
# [ 64 108 229]]
# [[ 28 27 35]
# [ 28 27 35]
# ...
# [ 76 117 223]]], dtype=uint8)
PIL Image[H, W, C]
=> NumPy Array[H, W, C]
=> PyTorch Tensor[C, H, W]
:
from torchvision.datasets import OxfordIIITPet
import numpy as np
origin_data = OxfordIIITPet(
root="data",
transform=None
)
numpyimagearray = np.array(object=origin_data[0][0])
numpyimagearray = np.asarray(origin_data[0][0])
numpyimagearray
# array([[[ 37 20 12]
# [ 35 18 10]
# ...
# [249 248 253]]
# [[ 35 18 10]
# [ 35 18 10]
# ...
# [249 248 253]]
# [[ 35 18 10]
# [ 36 19 11]
# ...
# [250 249 254]]
# ...
# [[ 5 6 24]
# [ 4 5 23]
# ...
# [ 69 110 224]]
# [[ 4 3 19]
# [ 3 2 18]
# ...
# [ 64 108 229]]
# [[ 28 27 35]
# [ 28 27 35]
# ...
# [ 76 117 223]]], dtype=uint8)
pytorchimagetensor = torch.from_numpy(numpyimagearray).permute(dims=[2, 0, 1])
pytorchimagetensor = torch.tensor(numpyimagearray).permute(dims=[2, 0, 1])
pytorchimagetensor
# tensor([[[ 37, 35, 36, ..., 247, 249, 249],
# [ 35, 35, 37, ..., 246, 248, 249],
# ...,
# [ 28, 28, 27, ..., 59, 65, 76]],
# [[ 20, 18, 19, ..., 248, 248, 248],
# [ 18, 18, 20, ..., 247, 247, 248],
# ...,
# [ 27, 27, 27, ..., 94, 106, 117]],
# [[ 12, 10, 11, ..., 253, 253, 253],
# [ 10, 10, 12, ..., 251, 252, 253],
# ...,
# [ 35, 35, 35, ..., 214, 232, 223]]], dtype=torch.uint8)