from fastai.vision.allimport*dls = ImageDataLoaders.from_folder( Path("./huskies_vs_wolves/"), item_tfms=RandomResizedCrop(128, min_scale=0.35), batch_tfms=Normalize.from_stats(*imagenet_stats))dls.show_batch() # One line to see our data
show_batch 使查看数据在转换后的样子变得很容易
learn = Learner(dls, xresnet34(n_out=2), metrics=accuracy)learn.fit_one_cycle(5, 0.0015)learn.show_results() # One line to see predictions
from torchvision import transforms as Ttransforms_pt = T.Compose([ T.Resize(256), T.CenterCrop(224), T.ToTensor(), T.Normalize(*imagenet_stats)])# Load and transform an imageimg = Image.open("./huskies_vs_wolves/train/husky/husky_0.jpeg")img_transformed = transforms_pt(img)# Try to look at what we did...show_image(img_transformed);
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Got range [-2.1007793..2.2489083].
def decode_pt(tensor, mean, std):"""Decode a normalized PyTorch tensor back to RGB range""" out = tensor.clone() # Clone to avoid modifying originalfor t, m, s inzip(out, mean, std): t.mul_(s).add_(m) # Denormalize out = out.mul(255).clamp(0, 255).byte() # Scale back to RGBreturn outimg_decoded = decode_pt(img_transformed, *imagenet_stats)show_image(img_decoded);
transforms_ft = Pipeline([ load_img_msk, # <-- New load func for img and mask RandomResizedCrop(200), # Applied to both img and mask ToTensor(), # Applied to both img and mask IntToFloatTensor(), # Only applied to img Normalize.from_stats(*imagenet_stats) # Only applied to img])out = transforms_ft(fn)outshow_images((out[0][0], out[1]))
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Got range [-1.8096584..2.64].