Pytorch 1.7.1
所有文档

          BML 全功能AI开发平台

          Pytorch 1.7.1

          Pytorch

          训练代码
          基于Pytorch框架的MNIST图像分类示例代码,数据集请点击这里下载。
          单机训练时(计算节点等于1),示例代码如下:

          import argparse
          import torch
          import torch.nn as nn
          import torch.nn.functional as F
          import torch.optim as optim
          import torch.utils.data as data
          from torchvision import transforms
          import codecs
          import errno
          import gzip
          import numpy as np
          import os
          from PIL import Image
          # Training settings
          parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
          parser.add_argument('--train-dir', type=str, default='./train_data',
                              help='input data dir for training (default: ./train_data)')
          parser.add_argument('--test-dir', type=str, default='./test_data',
                              help='input data dir for test (default: ./test_data)')
          parser.add_argument('--output-dir', type=str, default='./output',
                              help='output dir for custom job (default: ./output)')                    
          parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                              help='input batch size for training (default: 64)')
          parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
                              help='input batch size for testing (default: 64)')
          parser.add_argument('--epochs', type=int, default=10, metavar='N',
                              help='number of epochs to train (default: 10)')
          parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                              help='learning rate (default: 0.01)')
          parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                              help='SGD momentum (default: 0.5)')
          parser.add_argument('--no-cuda', action='store_true', default=False,
                              help='disables CUDA training')
          parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                              help='how many batches to wait before logging training status')
          # 定义MNIST数据集的dataset
          class MNIST(data.Dataset):
              """
              MNIST dataset
              """
              training_file = 'training.pt'
              test_file = 'test.pt'
              classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
                         '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
              def __init__(self, root, train=True, transform=None, target_transform=None):
                  self.root = os.path.expanduser(root)
                  self.transform = transform
                  self.target_transform = target_transform
                  self.train = train  # training set or test set
                  self.preprocess(root, train, False)
                  if self.train:
                      data_file = self.training_file
                  else:
                      data_file = self.test_file
                  self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
                  
              def __getitem__(self, index):
                  """
                  Args:
                      index (int): Index
                  Returns:
                      tuple: (image, target) where target is index of the target class.
                  """
                  img, target = self.data[index], int(self.targets[index])
                  # doing this so that it is consistent with all other datasets
                  # to return a PIL Image
                  img = Image.fromarray(img.numpy(), mode='L')
                  if self.transform is not None:
                      img = self.transform(img)
                  if self.target_transform is not None:
                      target = self.target_transform(target)
                      
                  return img, target
              def __len__(self):
                  return len(self.data)
              
              @property
              def raw_folder(self):
                  """
                  raw folder
                  """
                  return os.path.join('/tmp', 'raw')
              
              @property
              def processed_folder(self):
                  """
                  processed folder
                  """
                  return os.path.join('/tmp', 'processed')
                  
              # data preprocessing
              def preprocess(self, train_dir, train, remove_finished=False):
                  """
                  preprocess
                  """
                  makedir_exist_ok(self.raw_folder)
                  makedir_exist_ok(self.processed_folder)
                  train_list = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz']
                  test_list = ['t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
                  zip_list = train_list if train else test_list
                  for zip_file in zip_list:
                      print('Extracting {}'.format(zip_file))
                      zip_file_path = os.path.join(train_dir, zip_file)
                      raw_folder_path = os.path.join(self.raw_folder, zip_file)
                      with open(raw_folder_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(zip_file_path) as zip_f:
                          out_f.write(zip_f.read())
                      if remove_finished:
                          os.unlink(zip_file_path)
                  if train:
                      training_set = (
                          read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
                          read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
                      )
                      with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
                          torch.save(training_set, f)
                  else:
                      test_set = (
                          read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
                          read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
                      )
                      with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
                          torch.save(test_set, f)
                  
          def get_int(b):
              """
              get int
              """
              return int(codecs.encode(b, 'hex'), 16)
          def read_label_file(path):
              """
              read label file
              """
              with open(path, 'rb') as f:
                  data = f.read()
                  assert get_int(data[:4]) == 2049
                  length = get_int(data[4:8])
                  parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
                  return torch.from_numpy(parsed).view(length).long()
          def read_image_file(path):
              """
              read image file
              """
              with open(path, 'rb') as f:
                  data = f.read()
                  assert get_int(data[:4]) == 2051
                  length = get_int(data[4:8])
                  num_rows = get_int(data[8:12])
                  num_cols = get_int(data[12:16])
                  parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
                  return torch.from_numpy(parsed).view(length, num_rows, num_cols)
          def makedir_exist_ok(dirpath):
              """
              Python2 support for os.makedirs(.., exist_ok=True)
              """
              try:
                  os.makedirs(dirpath)
              except OSError as e:
                  if e.errno == errno.EEXIST:
                      pass
                  else:
                      raise
          # 定义网络模型
          class Net(nn.Module):
              """
              Net
              """
              def __init__(self):
                  super(Net, self).__init__()
                  self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
                  self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
                  self.conv2_drop = nn.Dropout2d()
                  self.fc1 = nn.Linear(320, 50)
                  self.fc2 = nn.Linear(50, 10)
              def forward(self, x):
                  """
                  forward
                  """
                  x = F.relu(F.max_pool2d(self.conv1(x), 2))
                  x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
                  x = x.view(-1, 320)
                  x = F.relu(self.fc1(x))
                  x = F.dropout(x, training=self.training)
                  x = self.fc2(x)
                  return F.log_softmax(x)
          def train(epoch):
              """
              train
              """
              model.train()
              for batch_idx, (data, target) in enumerate(train_loader):
                  if args.cuda:
                      data, target = data.cuda(), target.cuda()
                  optimizer.zero_grad()
                  output = model(data) # 获取预测值
                  loss = F.nll_loss(output, target) # 计算loss
                  loss.backward()
                  optimizer.step()       
                  if batch_idx % args.log_interval == 0:
                      print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                          epoch, batch_idx, len(train_loader),
                          100. * batch_idx / len(train_loader), loss.item()))
                      
          def test():
              """
              test
              """
              model.eval()
              test_loss = 0.
              test_accuracy = 0.
              for data, target in test_loader:
                  if args.cuda:
                      data, target = data.cuda(), target.cuda()
                  output = model(data)
                  # sum up batch loss
                  test_loss += F.nll_loss(output, target, size_average=False).item()
                  # get the index of the max log-probability
                  pred = output.data.max(1, keepdim=True)[1]
                  test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
              test_loss /= len(test_loader) * args.test_batch_size
              test_accuracy /= len(test_loader) * args.test_batch_size
              print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
                  test_loss, 100. * test_accuracy))
          def save():
              """
              save
              """
              if not os.path.exists(args.output_dir):
                  os.makedirs(args.output_dir)
              # 保存模型
              torch.save(model.state_dict(), os.path.join(args.output_dir, 'model.pkl'))
          if __name__ == '__main__':
              args = parser.parse_args()
              args.cuda = not args.no_cuda and torch.cuda.is_available()
              # 若无测试集,训练集做验证集
              if not os.path.exists(args.test_dir) or not os.listdir(args.test_dir):
                  args.test_dir = args.train_dir
              # 将数据进行转化,从PIL.Image/numpy.ndarray的数据进转化为torch.FloadTensor
              trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
              train_set = MNIST(root=args.train_dir, train=True, transform=trans)
              test_set = MNIST(root=args.test_dir, train=False, transform=trans)
              # 定义data reader
              train_loader = torch.utils.data.DataLoader(
                              dataset=train_set,
                              batch_size=args.batch_size,
                              shuffle=True)
              test_loader = torch.utils.data.DataLoader(
                              dataset=test_set,
                              batch_size=args.test_batch_size,
                              shuffle=False)
              # 选择模型
              model = Net()
              if args.cuda:
                  # Move model to GPU.
                  model.cuda()
              print(model)
              # 选择优化器
              optimizer = optim.SGD(model.parameters(), lr=args.lr,
                                    momentum=args.momentum)
              for epoch in range(1, args.epochs + 1):
                  train(epoch)
                  test()
                  save()

          分布式训练时(计算节点大于1),示例代码如下:
          说明:demo分布式程序没有做数据的分片操作,仅供参考

          import argparse
          import torch
          import torch.nn as nn
          import torch.nn.functional as F
          import torch.optim as optim
          import torch.utils.data as data
          from torchvision import datasets, transforms
          import codecs
          import errno
          import gzip
          import numpy as np
          import os
          from PIL import Image
          import torch.multiprocessing as mp
          import torch.utils.data.distributed
          import horovod.torch as hvd
          # Training settings
          parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
          parser.add_argument('--train-dir', type=str, default='./train_data',
                              help='input data dir for training (default: ./train_data)')
          parser.add_argument('--test-dir', type=str, default='./test_data',
                              help='input data dir for test (default: ./test_data)')
          parser.add_argument('--output-dir', type=str, default='./output',
                              help='output dir for custom job (default: ./output)')                    
          parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                              help='input batch size for training (default: 64)')
          parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',
                              help='input batch size for testing (default: 64)')
          parser.add_argument('--epochs', type=int, default=10, metavar='N',
                              help='number of epochs to train (default: 10)')
          parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                              help='learning rate (default: 0.01)')
          parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                              help='SGD momentum (default: 0.5)')
          parser.add_argument('--no-cuda', action='store_true', default=False,
                              help='disables CUDA training')
          parser.add_argument('--seed', type=int, default=42, metavar='S',
                              help='random seed (default: 42)')
          parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                              help='how many batches to wait before logging training status')
          parser.add_argument('--fp16-allreduce', action='store_true', default=False,
                              help='use fp16 compression during allreduce')
          parser.add_argument('--use-adasum', action='store_true', default=False,
                              help='use adasum algorithm to do reduction')
          parser.add_argument('--gradient-predivide-factor', type=float, default=1.0,
                              help='apply gradient predivide factor in optimizer (default: 1.0)')
          # 定义MNIST数据集的dataset
          class MNIST(data.Dataset):
              """
              MNIST dataset
              """
              training_file = 'training.pt'
              test_file = 'test.pt'
              classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
                         '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
              def __init__(self, root, train=True, transform=None, target_transform=None):
                  self.root = os.path.expanduser(root)
                  self.transform = transform
                  self.target_transform = target_transform
                  self.train = train  # training set or test set
                  self.preprocess(root, train, False)
                  if self.train:
                      data_file = self.training_file
                  else:
                      data_file = self.test_file
                  self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
                  
              def __getitem__(self, index):
                  """
                  Args:
                      index (int): Index
                  Returns:
                      tuple: (image, target) where target is index of the target class.
                  """
                  img, target = self.data[index], int(self.targets[index])
                  # doing this so that it is consistent with all other datasets
                  # to return a PIL Image
                  img = Image.fromarray(img.numpy(), mode='L')
                  if self.transform is not None:
                      img = self.transform(img)
                  if self.target_transform is not None:
                      target = self.target_transform(target)
                      
                  return img, target
              def __len__(self):
                  return len(self.data)
              
              @property
              def raw_folder(self):
                  """
                  raw folder
                  """
                  return os.path.join('/tmp', 'raw')
              
              @property
              def processed_folder(self):
                  """
                  processed folder
                  """
                  return os.path.join('/tmp', 'processed')
                  
              # data preprocessing
              def preprocess(self, train_dir, train, remove_finished=False):
                  """
                  preprocess
                  """
                  makedir_exist_ok(self.raw_folder)
                  makedir_exist_ok(self.processed_folder)
                  train_list = ['train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz']
                  test_list = ['t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz']
                  zip_list = train_list if train else test_list
                  for zip_file in zip_list:
                      print('Extracting {}'.format(zip_file))
                      zip_file_path = os.path.join(train_dir, zip_file)
                      raw_folder_path = os.path.join(self.raw_folder, zip_file)
                      with open(raw_folder_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(zip_file_path) as zip_f:
                          out_f.write(zip_f.read())
                      if remove_finished:
                          os.unlink(zip_file_path)
                  if train:
                      training_set = (
                          read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
                          read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
                      )
                      with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
                          torch.save(training_set, f)
                  else:
                      test_set = (
                          read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
                          read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
                      )
                      with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
                          torch.save(test_set, f)
          def get_int(b):
              """
              get int
              """
              return int(codecs.encode(b, 'hex'), 16)
          def read_label_file(path):
              """
              read label file
              """
              with open(path, 'rb') as f:
                  data = f.read()
                  assert get_int(data[:4]) == 2049
                  length = get_int(data[4:8])
                  parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
                  return torch.from_numpy(parsed).view(length).long()
          def read_image_file(path):
              """
              read image file
              """
              with open(path, 'rb') as f:
                  data = f.read()
                  assert get_int(data[:4]) == 2051
                  length = get_int(data[4:8])
                  num_rows = get_int(data[8:12])
                  num_cols = get_int(data[12:16])
                  parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
                  return torch.from_numpy(parsed).view(length, num_rows, num_cols)
          def makedir_exist_ok(dirpath):
              """
              Python2 support for os.makedirs(.., exist_ok=True)
              """
              try:
                  os.makedirs(dirpath)
              except OSError as e:
                  if e.errno == errno.EEXIST:
                      pass
                  else:
                      raise
          # 定义网络模型
          class Net(nn.Module):
              """
              Net
              """
              def __init__(self):
                  super(Net, self).__init__()
                  self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
                  self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
                  self.conv2_drop = nn.Dropout2d()
                  self.fc1 = nn.Linear(320, 50)
                  self.fc2 = nn.Linear(50, 10)
              def forward(self, x):
                  """
                  forward
                  """
                  x = F.relu(F.max_pool2d(self.conv1(x), 2))
                  x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
                  x = x.view(-1, 320)
                  x = F.relu(self.fc1(x))
                  x = F.dropout(x, training=self.training)
                  x = self.fc2(x)
                  return F.log_softmax(x)
          def train(epoch):
              """
              train
              """
              model.train()
              # Horovod: set epoch to sampler for shuffling.
              train_sampler.set_epoch(epoch)
              for batch_idx, (data, target) in enumerate(train_loader):
                  if args.cuda:
                      data, target = data.cuda(), target.cuda()
                  optimizer.zero_grad()
                  output = model(data)
                  loss = F.nll_loss(output, target)
                  loss.backward()
                  optimizer.step()
                  if batch_idx % args.log_interval == 0:
                      # Horovod: use train_sampler to determine the number of examples in
                      # this worker's partition.
                      print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                          epoch, batch_idx * len(data), len(train_sampler),
                          100. * batch_idx / len(train_loader), loss.item()))
          def metric_average(val, name):
              """
              metric average
              """
              tensor = torch.tensor(val)
              avg_tensor = hvd.allreduce(tensor, name=name)
              return avg_tensor.item()
          def test():
              """
              test
              """
              model.eval()
              test_loss = 0.
              test_accuracy = 0.
              for data, target in test_loader:
                  if args.cuda:
                      data, target = data.cuda(), target.cuda()
                  output = model(data)
                  # sum up batch loss
                  test_loss += F.nll_loss(output, target, size_average=False).item()
                  # get the index of the max log-probability
                  pred = output.data.max(1, keepdim=True)[1]
                  test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
              # Horovod: use test_sampler to determine the number of examples in
              # this worker's partition.
              test_loss /= len(test_sampler)
              test_accuracy /= len(test_sampler)
              # Horovod: average metric values across workers.
              test_loss = metric_average(test_loss, 'avg_loss')
              test_accuracy = metric_average(test_accuracy, 'avg_accuracy')
              # Horovod: print output only on first rank.
              if hvd.rank() == 0:
                  print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
                      test_loss, 100. * test_accuracy))
          def save():
              """
              save
              """
              if not os.path.exists(args.output_dir):
                  os.makedirs(args.output_dir)
              # 保存模型
              # Horovod: save model only on first rank.
              if hvd.rank() == 0:
                  torch.save(model.state_dict(), os.path.join(args.output_dir, 'model.pkl'))
          if __name__ == '__main__':
              args = parser.parse_args()
              args.cuda = not args.no_cuda and torch.cuda.is_available()
              # Horovod: initialize library.
              hvd.init()
              torch.manual_seed(args.seed)
              if args.cuda:
                  # Horovod: pin GPU to local rank.
                  torch.cuda.set_device(hvd.local_rank())
                  torch.cuda.manual_seed(args.seed)
              # Horovod: limit # of CPU threads to be used per worker.
              torch.set_num_threads(1)
              kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
              # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
              # issues with Infiniband implementations that are not fork-safe
              if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and
                      mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
                  kwargs['multiprocessing_context'] = 'forkserver'
              
              # 若无测试集,训练集做验证集
              if not os.path.exists(args.test_dir) or not os.listdir(args.test_dir):
                  args.test_dir = args.train_dir
               # 将数据进行转化,从PIL.Image/numpy.ndarray的数据进转化为torch.FloadTensor
              trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
              train_set = MNIST(root=args.train_dir, train=True, transform=trans)
              test_set = MNIST(root=args.test_dir, train=False, transform=trans)
              # Horovod: use DistributedSampler to partition the training data.
              train_sampler = torch.utils.data.distributed.DistributedSampler(
                  train_set, num_replicas=hvd.size(), rank=hvd.rank())
              # Horovod: use DistributedSampler to partition the test data.
              test_sampler = torch.utils.data.distributed.DistributedSampler(
                  test_set, num_replicas=hvd.size(), rank=hvd.rank())
              # 定义data reader    
              train_loader = torch.utils.data.DataLoader(
                              dataset=train_set, 
                              batch_size=args.batch_size, 
                              sampler=train_sampler, 
                              **kwargs)
              test_loader = torch.utils.data.DataLoader(
                              dataset=test_set, 
                              batch_size=args.test_batch_size,
                              sampler=test_sampler, 
                              **kwargs)
              model = Net()
              # By default, Adasum doesn't need scaling up learning rate.
              lr_scaler = hvd.size() if not args.use_adasum else 1
              if args.cuda:
                  # Move model to GPU.
                  model.cuda()
                  # If using GPU Adasum allreduce, scale learning rate by local_size.
                  if args.use_adasum and hvd.nccl_built():
                      lr_scaler = hvd.local_size()
              # Horovod: scale learning rate by lr_scaler.
              optimizer = optim.SGD(model.parameters(), lr=args.lr * lr_scaler,
                                    momentum=args.momentum)
              # Horovod: broadcast parameters & optimizer state.
              hvd.broadcast_parameters(model.state_dict(), root_rank=0)
              hvd.broadcast_optimizer_state(optimizer, root_rank=0)
              # Horovod: (optional) compression algorithm.
              compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
              # Horovod: wrap optimizer with DistributedOptimizer.
              optimizer = hvd.DistributedOptimizer(optimizer,
                                                   named_parameters=model.named_parameters(),
                                                   compression=compression,
                                                   op=hvd.Adasum if args.use_adasum else hvd.Average,
                                                   gradient_predivide_factor=args.gradient_predivide_factor)
              for epoch in range(1, args.epochs + 1):
                  train(epoch)
                  test()
                  save()

          推理代码
          Pytorch模型在发布到模型仓库时,需要上传用于启动服务的自定义代码,并且在主文件名指定的py模块中实现:模型加载『model_fn』、请求预处理『input_fn』和预测结果后处理『output_fn』函数。
          示例代码:

          #!/usr/bin/env python
          # -*- coding: utf-8 -*-
          """
          @license: Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved.
          @desc: 图像预测算法示例
          """
          import logging
          import torch
          import torch.nn as nn
          import torch.nn.functional as F
          import base64
          from PIL import Image
          from io import BytesIO
          from torchvision import datasets, models, transforms
          MODEL_FILE_NAME = 'model.pkl'  # 模型文件名称
          def get_image_transform():
              """获取图片处理的transform
              Args:
                  data_type: string, type of data(train/test)
              Returns:
                  torchvision.transforms.Compose
              """
              trans = transforms.Compose([transforms.Resize((28, 28)),
                                          transforms.ToTensor(),
                                          transforms.Normalize((0.5,), (1.0,))])
              return trans
          def model_fn(model_dir):
              """模型加载
              Args:
                  model_dir: 模型路径,该目录存储的文件为在自定义作业选择的输出路径下产出的文件
              Returns:
                  加载好的模型对象
              """
              class Net(nn.Module):
                  def __init__(self):
                      super(Net, self).__init__()
                      self.fc1 = nn.Linear(28 * 28, 500)
                      self.fc2 = nn.Linear(500, 256)
                      self.fc3 = nn.Linear(256, 10)
                  def forward(self, x):
                      x = x.view(-1, 28 * 28)
                      x = F.relu(self.fc1(x))
                      x = F.relu(self.fc2(x))
                      x = self.fc3(x)
                      return x
                  def name(self):
                      return "myNet"
              model = Net()
              meta_info_path = "%s/%s" % (model_dir, MODEL_FILE_NAME)
              device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
              model.load_state_dict(torch.load(meta_info_path, map_location=device))
              model.to(device)
              logging.info("device type: " + str(device))
              return model
          def input_fn(request):
              """对输入进行格式化,处理为预测需要的输入格式
              Args:
                  request: api请求的json
              Returns:
                  预测需要的输入数据,一般为tensor
              """
              instances = request['instances']
              transform_composes = get_image_transform()
              arr_tensor_data = []
              for instance in instances:
                  decoded_data = base64.b64decode(instance['data'].encoding("utf8"))
                  byte_stream = BytesIO(decoded_data)
                  roiImg = Image.open(byte_stream)
                  target_data = transform_composes(roiImg)
                  arr_tensor_data.append(target_data)
              tensor_data = torch.stack(arr_tensor_data, dim=0)
              return tensor_data
          def output_fn(predict_result):
              """进行输出格式化
              Args:
                  predict_result: 预测结果
              Returns:
                  格式化后的预测结果,需能够json序列化以便接口返回
              """
              js_str = None
              if type(predict_result) == torch.Tensor:
                  list_prediction = predict_result.detach().cpu().numpy().tolist()
                  js_str = json.dumps(list_prediction)
              return js_str
          上一篇
          PaddlePaddle 2.0.0rc
          下一篇
          Sklearn 0.23.2