1. 分析业务,明白业务主要流程 2. 将业务数据,转化为深度学习批处理格式 3. 输入,这个通常比较固定 4. 输出:这个有较强的业务依赖 4.1 分类业务,输出类似是这样的[[是/否]] 4.2 多任务,输出类似是这样的[[是/否label1, label2, label3]], 即模型的输出是一个向量,向量的每个元素都可以是一个业务指标
MNIST数据集下载 from torchvision import datasets mnist = datasets.MNIST(root="MNIST_reader",download=True) MNIST数据集说明 图像共7W个,10个类别,分别是0-9 训练集6W,测试集1W test.csv中第一行的记录的内容: datasets\MNIST\test\0\1.jpeg 0 这表示路径为datasets\MNIST\test\0\1.jpeg的图片对应的标签是0,即该图片的内容是手写的数字0 图像特点: 1. 类别简明,只有十类, 2. 每个类别的内容也简单,只是数字,数字的笔画相对较少 3. 手写,手写不像印刷体那样像复制似的,大致相同但细节不同 4. 每个类别的样本足够,6W个0-9的数字,平均每类6000个样本; 是如此简单的笔画下,每个笔画都准备了6000个样本的足够; 并不是随便一个其他业务含义的类别准备6000样本就叫足够... 5. 样本数量均衡 6. 每个图像中的数字都位于图像的正中间,无残缺,即每个数字都很完整 7. 每个图像很小,内容也简单 8. 各个图像的大小基本一样,言外之意,不用再对图像进行剪裁了 9. 图像是灰度图 综上所述,这是一套经过精巧设计,较好的,很干净的 可以验证模型能力的小型图像数据集 |
minist $ ls -ltrh total 2.4M -rwxrwxrwx 1 xt xt 2.1M Mar 2 2023 train.csv -rwxrwxrwx 1 xt xt 332K Mar 2 2023 test.csv drwxrwxrwx 1 xt xt 4.0K Jan 11 15:21 test drwxrwxrwx 1 xt xt 4.0K Jan 11 15:24 train $ less test.csv datasets\MNIST\test\0\1.jpeg,0 datasets\MNIST\test\0\10.jpeg,0 datasets\MNIST\test\0\100.jpeg,0 datasets\MNIST\test\0\101.jpeg,0 datasets\MNIST\test\0\102.jpeg,0 datasets\MNIST\test\0\103.jpeg,0 datasets\MNIST\test\0\104.jpeg,0 datasets\MNIST\test\0\105.jpeg,0 datasets\MNIST\test\0\106.jpeg,0 datasets\MNIST\test\0\107.jpeg,0 datasets\MNIST\test\0\108.jpeg,0 |
处理保存 原MNIST 9G大小,这是因为7万张小图片分散开后占用太多空间, 它们本身并不大,以字节的方式存储后约300M, (base) xt@ai:/opt/tpf/aiwks/datasets/MNIST_reader/datasets$ du -sh MNIST 8.6G MNIST (base) xt@ai:/opt/tpf/aiwks/datasets/MNIST_reader/datasets$ ls -ltrh total 18M drwxr-xr-x 4 xt xt 128K 3月 2 20:34 MNIST -rwxr-xr-x 1 xt xt 18M 3月 4 16:08 MNIST.tar.gz 个人PC存储有限,故转化后存储 import os import torch from torch.utils.data import Dataset from PIL import Image from torchvision import transforms class ImgSet(Dataset): """手写数字数据集 """ def __init__(self, fil=None, pfil="/opt/tpf/aiwks/datasets/MNIST_reader",img_size = (32,32)): """超参数初始化 pfil:数据集所有目录的父目录 """ with open(file=fil, mode="r", encoding="utf8") as f: images = [line.strip().split(",") for line in f.readlines()] for img in images: # 在linux上运行,所以需要转换一下文件分隔符 img[0] = os.path.join(pfil,img[0].replace("\\","/")) self.images = images # 图片转Tensor self.trans = transforms.Compose([ transforms.Grayscale(), transforms.Resize(size=img_size), transforms.ToTensor() ]) def __getitem__(self, idx): """读取图像并转化为数据与标签 """ img_path, label = self.images[idx] img = Image.open(fp=img_path) return self.trans(img), torch.tensor(data=int(label)).long() def __len__(self): """数据集大小 """ return len(self.images) # train_dataset = ImgSet(fil="/opt/tpf/aiwks/datasets/MNIST_reader/train.csv",img_size=(32,32)) # print(len(train_dataset)) # 60000 # test_dataset = ImgSet(fil="/opt/tpf/aiwks/datasets/MNIST_reader/test.csv",img_size=(32,32)) # print(len(test_dataset)) # 10000 from ai.box.d1 import pkl_load,pkl_save def save_dataset(dataset,save_file): img_list = [] for x,y in dataset: img_list.append((x,y)) pkl_save(data=img_list,file_path=save_file) # 267M # save_dataset(train_dataset,save_file = "/opt/tpf/aiwks/datasets/MNIST_reader/img0-9_train.pkl") # 45M # save_dataset(test_dataset,save_file = "/opt/tpf/aiwks/datasets/MNIST_reader/img0-9_test.pkl") |
加载测试 import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.utils.data import Dataset from torch.utils.data import DataLoader from torchvision import datasets from tpf.params import ImgPath from tpf import pkl_load,pkl_save # 数据加载测试 class MyDataset(Dataset): """手写数字数据集 """ def __init__(self, fil): """超参数初始化 """ self.img_list = pkl_load(file_path=fil) def __getitem__(self, idx): """读取图像及标签 """ return self.img_list[idx][0], self.img_list[idx][1] def __len__(self): """数据集大小 """ return len(self.img_list) train_dataset = MyDataset(fil=ImgPath.mnist_data_train) print(len(train_dataset)) # 60000 test_dataset = MyDataset(fil=ImgPath.mnist_data_test) print(len(test_dataset)) # 10000 # 训练集加载器 train_dataloader = DataLoader(dataset=train_dataset, batch_size=512, shuffle=True) # 测试集加载器 test_dataloader = DataLoader(dataset=test_dataset, batch_size=512, shuffle=False) for X,y in train_dataloader: print(X.shape,y.shape) break # 1个批次512个样本,一个像素用一个数值表示,shape为[32,32] torch.Size([512, 1, 32, 32]) torch.Size([512]) for X,y in train_dataloader: print(X[0][0],y[0]) break tensor([[0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], ..., [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.], [0., 0., 0., ..., 0., 0., 0.]]) tensor(7) |
|
|
The CIFAR-10 dataset The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. |
import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import Compose from torchvision.transforms import Resize from torchvision.transforms import ToTensor from torchvision.transforms import Normalize """ 打包数据 """ # 定义数据预处理 transforms = Compose(transforms=[Resize(size=(224, 224)), ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) data_name = "cifar10" train_dataset = datasets.CIFAR10(root=data_name, train=True, transform=transforms, download=True) print(len(train_dataset)) # train_dataloader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) # test_dataloader test_dataset = datasets.CIFAR10(root=data_name, train=False, transform=transforms, download=True) print(len(test_dataset)) # test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False) |
CIFAR10下载与引用位置 在/opt/tpf目录下执行的python命令,文件将下载于/opt/tpf目录 (base) xt@ai1:/opt/tpf$ /home/xt/anaconda3/bin/python /opt/tpf/aiwks/code/aisty/test/cv/c10.py 将下载的文件copy到 其他目录,进行图片所在目录执行,说明文件已下载 (base) xt@ai1:/opt/tpf$ cp -r cifar10/ /opt/tpf/aiwks/datasets/images/ (base) xt@ai1:/opt/tpf$ cd /opt/tpf/aiwks/datasets/images (base) xt@ai1:/opt/tpf/aiwks/datasets/images$ /home/xt/anaconda3/bin/python /opt/tpf/aiwks/code/aisty/test/cv/c10.py Files already downloaded and verified 50000 Files already downloaded and verified 10000 删除该目录下的图像文件,再次执行则会重新下载 (base) xt@ai1:/opt/tpf/aiwks/datasets/images$ rm -rf cifar10/ (base) xt@ai1:/opt/tpf/aiwks/datasets/images$ /home/xt/anaconda3/bin/python /opt/tpf/aiwks/code/aisty/test/cv/c10.py Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to cifar10/cifar-10-python.tar.gz 7%|████████▏ 也就是说,你要在哪个目录执行python命令,就像图像文件COPY到哪个目录,就不需要重新下载了; 如果网速可以,重新下载1分钟左右 保存一下方便后续使用 from ai.box.d1 import pkl_load,pkl_save import os data_base = "/opt/tpf/aiwks/datasets/images" c10 = os.path.join(data_base,"c10_train.pkl") pkl_save(data=train_dataset,file_path=c10) train_dataset = pkl_load(file_path=c10) print(len(train_dataset)) c10_test = os.path.join(data_base,"c10_test.pkl") pkl_save(data=test_dataset,file_path=c10_test) test_dataset = pkl_load(file_path=c10_test) print(len(test_dataset)) |
默认路径 """ from torchvision import datasets from torchvision.transforms import Compose from torchvision.transforms import Resize from torchvision.transforms import ToTensor from torchvision.transforms import Normalize """ """ 打包数据-测试集 """ from tpf.datasets import local_cifar10_train from torch.utils.data import DataLoader train_dataset=local_cifar10_train() train_dataloader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) """ 打包数据-测试集 """ from tpf.datasets import local_cifar10_test test_dataset = local_cifar10_test() test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False) 指定路径 from ai.box.d1 import pkl_load import os data_base = "/opt/tpf/aiwks/datasets/images" c10 = os.path.join(data_base,"c10_train.pkl") train_dataset = pkl_load(file_path=c10) print(len(train_dataset)) c10_test = os.path.join(data_base,"c10_test.pkl") test_dataset = pkl_load(file_path=c10_test) print(len(test_dataset)) # train_dataloader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) # test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False) |
|
The CIFAR-100 dataset
This dataset is just like the CIFAR-10, except it has **100 classes** containing **600 images each**. There are **500 training images and 100 testing images per class**. The 100 classes in the CIFAR-100 are grouped into 20 superclasses. Each image comes with a "fine" label (the class to which it belongs) and a "coarse" label (the superclass to which it belongs).
展示代码
import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import Compose from torchvision.transforms import Resize from torchvision.transforms import ToTensor from torchvision.transforms import Normalize """ 打包数据 """ # 定义数据预处理 transforms = Compose(transforms=[Resize(size=(224, 224)), ToTensor(), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) data_name = "cifar100" train_dataset = datasets.CIFAR100(root=data_name, train=True, transform=transforms, download=True) # print(len(train_dataset)) # train_dataloader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) # test_dataloader test_dataset = datasets.CIFAR100(root=data_name, train=False, transform=transforms, download=True) # print(len(test_dataset)) # test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False) from ai.box.d1 import pkl_load,pkl_save import os data_base = "/opt/tpf/aiwks/datasets/images" c100 = os.path.join(data_base,"c100_train.pkl") pkl_save(data=train_dataset,file_path=c100) train_dataset = pkl_load(file_path=c100) print(train_dataset) c100_test = os.path.join(data_base,"c100_test.pkl") pkl_save(data=test_dataset,file_path=c100_test) test_dataset = pkl_load(file_path=c100_test) print(test_dataset)
后续使用
from ai.box.d1 import pkl_load import os data_base = "/opt/tpf/aiwks/datasets/images" c100 = os.path.join(data_base,"c100_train.pkl") train_dataset = pkl_load(file_path=c100) print(len(train_dataset)) # 50000 print(train_dataset[0]) """ (tensor([[[ 1.0000, 1.0000, 1.0000, ..., 0.4275, 0.4275, 0.4275], [ 1.0000, 1.0000, 1.0000, ..., 0.4275, 0.4275, 0.4275], [ 1.0000, 1.0000, 1.0000, ..., 0.4275, 0.4275, 0.4275], ..., [-0.3176, -0.3176, -0.3176, ..., 0.0824, 0.0824, 0.0824], [-0.3176, -0.3176, -0.3176, ..., 0.0824, 0.0824, 0.0824], [-0.3176, -0.3176, -0.3176, ..., 0.0824, 0.0824, 0.0824]], [[ 1.0000, 1.0000, 1.0000, ..., 0.5216, 0.5216, 0.5216], [ 1.0000, 1.0000, 1.0000, ..., 0.5216, 0.5216, 0.5216], [ 1.0000, 1.0000, 1.0000, ..., 0.5216, 0.5216, 0.5216], ..., [-0.0431, -0.0431, -0.0431, ..., 0.3569, 0.3569, 0.3569], [-0.0431, -0.0431, -0.0431, ..., 0.3569, 0.3569, 0.3569], [-0.0431, -0.0431, -0.0431, ..., 0.3569, 0.3569, 0.3569]], [[ 1.0000, 1.0000, 1.0000, ..., 0.3098, 0.3098, 0.3098], [ 1.0000, 1.0000, 1.0000, ..., 0.3098, 0.3098, 0.3098], [ 1.0000, 1.0000, 1.0000, ..., 0.3098, 0.3098, 0.3098], ..., [-0.6784, -0.6784, -0.6784, ..., -0.3804, -0.3804, -0.3804], [-0.6784, -0.6784, -0.6784, ..., -0.3804, -0.3804, -0.3804], [-0.6784, -0.6784, -0.6784, ..., -0.3804, -0.3804, -0.3804]]]), 19) """ c100_test = os.path.join(data_base,"c100_test.pkl") test_dataset = pkl_load(file_path=c100_test) print(len(test_dataset)) # 10000 from torch.utils.data import DataLoader train_dataloader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) # test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=False) for X,y in train_dataloader: print(X.shape,y.shape) # torch.Size([32, 3, 224, 224]) torch.Size([32]) break
CelebA/Img/img_celeba: 明星的上半身或全身图像,大小近似但不一
The CIFAR-10 dataset