github/tensorflow/models/tutorials/image/cifar10/cifar10_input.py

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""解码CIFAR-10二进制文件"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os

from six.moves import xrange
import tensorflow as tf

# 处理图像为这个大小。注意这与原始的CIFAR图像大小32*32不同。 如果改变这个数字,那么整个模型结构会随之改变并需要重新训练。
IMAGE_SIZE = 24

# 描述CIFAR-10数据集的全局常量。
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000

def read_cifar10(filename_queue):
  """从CIFAR10数据文件中阅读并解析example。
  推荐:如果你想要N路并行阅读,那么调用这个函数N次。这样会返回N个独立的Reader用来阅读那些文件里不同的文件和位置,这样会返回更好的混合example。
  参数:
    filename_queue: 文件名字符串队列。
  返回:
    一个object代表一个example,包括以下内容:
      高: result的行数(32)
      宽: result的列数(32)
      深: result的色彩通道数(3)
      key: 一个标量字符串描述这个example的文件名和record number。
      标签: 一个带有标签(0..9)的int32 Tensor
      uint8image: 一个带有图像数据的[height, width, depth] uint8 Tensor
  """

  class CIFAR10Record(object):
    pass
  result = CIFAR10Record()

  # CIFAR-10数据集中图像的维度。
  label_bytes = 1
  result.height = 32
  result.width = 32
  result.depth = 3
  image_bytes = result.height * result.width * result.depth
  # 每一个record包含一个标签和一个固定长度的用来描述图像的bytes。
  record_bytes = label_bytes + image_bytes

  # 阅读一个record,从filename_queue中获得filename。CIFAR-10格式没有header何footer,所以我们默认header——bytes和footer_bytes为0。
  reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
  result.key, value = reader.read(filename_queue)

  # 将一个字符串转换成一个uint8向量
  record_bytes = tf.decode_raw(value, tf.uint8)

  # 第一个bytes代表标签,我们将它转换成int32。
  result.label = tf.cast(
      tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)

  # 剩下的bytes代表图像,我们将它reshape成[depth, height, width]。
  depth_major = tf.reshape(
      tf.strided_slice(record_bytes, [label_bytes],
                       [label_bytes + image_bytes]),
      [result.depth, result.height, result.width])
  # 转换成[height, width, depth]
  result.uint8image = tf.transpose(depth_major, [1, 2, 0])

  return result

def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """生成一个图像和标签batch队列。
  Args:
    image: float32类型的[height, width, 3]Tensor
    label: int32类型的Tensor
    min_queue_examples: int32,保留在队列中的samples的最小数量,用来提供example batch。
    batch_size: 每个batch的图像数量。
    shuffle: boolean 表示是否打乱队列。
  Returns:
    images: Images. [batch_size, height, width, 3] tensor
    labels: Labels. [batch_size] tensor
  """
  # 创建一个队列来打乱example,然后阅读‘batch_size‘ images + labels
  num_preprocess_threads = 16
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # 在visualizer中展示训练图像。
  tf.summary.image(‘images‘, images)

  return images, tf.reshape(label_batch, [batch_size])

def distorted_inputs(data_dir, batch_size):
  """Construct distorted input for CIFAR training using the Reader ops.
  Args:
    data_dir: Path to the CIFAR-10 data directory.
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)
               for i in xrange(1, 6)]
  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError(‘Failed to find file: ‘ + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  with tf.name_scope(‘data_augmentation‘):
    # Read examples from files in the filename queue.
    read_input = read_cifar10(filename_queue)
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)

    height = IMAGE_SIZE
    width = IMAGE_SIZE

    # Image processing for training the network. Note the many random
    # distortions applied to the image.

    # Randomly crop a [height, width] section of the image.
    distorted_image = tf.random_crop(reshaped_image, [height, width, 3])

    # Randomly flip the image horizontally.
    distorted_image = tf.image.random_flip_left_right(distorted_image)

    # Because these operations are not commutative, consider randomizing
    # the order their operation.
    # NOTE: since per_image_standardization zeros the mean and makes
    # the stddev unit, this likely has no effect see tensorflow#1458.
    distorted_image = tf.image.random_brightness(distorted_image,
                                                 max_delta=63)
    distorted_image = tf.image.random_contrast(distorted_image,
                                               lower=0.2, upper=1.8)

    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_standardization(distorted_image)

    # Set the shapes of tensors.
    float_image.set_shape([height, width, 3])
    read_input.label.set_shape([1])

    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                             min_fraction_of_examples_in_queue)
    print (‘Filling queue with %d CIFAR images before starting to train. ‘
           ‘This will take a few minutes.‘ % min_queue_examples)

  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples, batch_size,
                                         shuffle=True)

def inputs(eval_data, data_dir, batch_size):
  """Construct input for CIFAR evaluation using the Reader ops.
  Args:
    eval_data: bool, indicating if one should use the train or eval data set.
    data_dir: Path to the CIFAR-10 data directory.
    batch_size: Number of images per batch.
  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  if not eval_data:
    filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)
                 for i in xrange(1, 6)]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  else:
    filenames = [os.path.join(data_dir, ‘test_batch.bin‘)]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError(‘Failed to find file: ‘ + f)

  with tf.name_scope(‘input‘):
    # Create a queue that produces the filenames to read.
    filename_queue = tf.train.string_input_producer(filenames)

    # Read examples from files in the filename queue.
    read_input = read_cifar10(filename_queue)
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)

    height = IMAGE_SIZE
    width = IMAGE_SIZE

    # Image processing for evaluation.
    # Crop the central [height, width] of the image.
    resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
                                                           height, width)

    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_standardization(resized_image)

    # Set the shapes of tensors.
    float_image.set_shape([height, width, 3])
    read_input.label.set_shape([1])

    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)

  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples, batch_size,
shuffle=False)

原文地址:https://www.cnblogs.com/estellellll/p/10551157.html

时间: 2024-11-07 21:10:11

github/tensorflow/models/tutorials/image/cifar10/cifar10_input.py的相关文章

TensorFlow models - object detection API 安装

tensorflow 的 models 模块非常有用,不仅实现了各种模型,也包括了 原作者 训练好的模型及其使用方法,本文 以 object detection 为例 来说明如何使用 训练好 的模型: 首先呢,还是建议 去 官网 看看使用方法,因为 tensorflow 的版本混乱,网上教程针对的版本各不相同,所以各种坑: 下面是正题,本文针对 windows 操作系统: 第一步:下载 models 模块,解压 https://github.com/tensorflow/models 第二步:安

机器学习在用到mnist数据集报错No module named 'tensorflow.examples.tutorials'解决办法

检查一下安装有tensorflow包的目录下的examples这个文件夹. 每个人的文件路径是不同的,我的在...\Python3\Lib\site-packages,该目录下有文件夹tensorflow, tensorflow_core, tensorflow_estimator等文件夹.进入tensorflow文件夹,里面发现一个examples文件夹,但是文件夹下只有saved_model这个文件,没有找到tutorials. 接下来我们进入github的tensorflow主页下载缺失的

tensorflow models api:ValueError: Tensor conversion requested dtype string for Tensor with dtype float32: 'Tensor("arg0:0", shape=(), dtype=float32, device=/device:CPU:0)'

tensorflow models api:ValueError: Tensor conversion requested dtype string for Tensor with dtype float32: 'Tensor("arg0:0", shape=(), dtype=float32, device=/device:CPU:0)' 这个原因是你的tf-record有问题哈.检查pipline里面的tfrecord. tensorflow models api:ValueErr

https://github.com/tensorflow/models/blob/master/research/slim/datasets/preprocess_imagenet_validation_data.py 改编版

#!/usr/bin/env python # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License

『TensorFlow』分布式训练_其二_多GPU并行demo分析(待续)

建议比对『MXNet』第七弹_多GPU并行程序设计 models/tutorials/image/cifar10/cifer10_multi_gpu-train.py # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file exc

人工智能(AI)库TensorFlow 踩坑日记之二

上次 踩坑日志之一 遗留的问题终于解决了,所以作者(也就是我)终于有脸出来写第二篇了. 首先还是贴上 卷积算法的示例代码地址 :https://github.com/tensorflow/models   这个库里面主要是一些常用的模型用tensorflow实现之后的代码.其中我用的是 models/tree/master/tutorials/image/cifar10 这个示例,上一篇也大致讲过了. 关于上次遇到问题是: 虽然训练了很多次,但是每次实际去用时都是相同的结果.这个问题主要原因是

TensorFlow实战--阅读笔记part3

一.Tensorflow实现卷积神经网络 卷积神经网络的概念最早出自19世纪60年代科学技术提出的感受野.当时科学家通过对猫的视觉皮层细胞研究发现,每一个视觉神经元只会处理一小块区域的视觉图像,即感受野. 一个卷积层中可以有多个不同的卷积核,而每一个卷积核都对应一个滤波后映射出的新图像,同一个新图像中每一个像素都来自完全相同的卷积核,这就是卷积核的权值共享. 权值共享是为了降低模型复杂度,减轻过拟合并降低计算量. 一个隐含节点对应于新产生的图的一个像素点 1994年LeNet (Yann LeC

Tensorflow 多gpu训练

https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_multi_gpu_train.py https://dataxujing.github.io/TensorFlow-GPU-%E5%B9%B6%E8%A1%8C/ https://github.com/huyz1117/GoogLeNet https://blog.csdn.net/minstyrain/article/details

Tensorflow人工智能神经网络学习

下载与安装 你可以使用我们提供的二进制包, 或者使用源代码, 安装 TensorFlow. 二进制安装 TensorFlow Python API 依赖 Python 2.7 版本. 在 Linux 和 Mac 下最简单的安装方式, 是使用 pip 安装. 如果在安装过程中遇到错误, 请查阅 常见问题. 为了简化安装步骤, 建议使用 virtualenv, 教程见 这里. Ubuntu/Linux # 仅使用 CPU 的版本 $ pip install https://storage.googl