Tensorflow样例代码分析cifar10

git地址:https://github.com/tensorflow/models.git"""Routine for decoding the CIFAR-10 binary file format."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

# 定义图片的像素,原生图片32 x 32
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
#IMAGE_SIZE = 24
IMAGE_SIZE = 32
# Global constants describing the CIFAR-10 data set.
#分类数量
NUM_CLASSES = 10
#训练集大小
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
#评价集大小
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000

#从CIFAR10数据文件中读取样例
#filename_queue一个队列的文件名
def read_cifar10(filename_queue):
  """
  Reads and parses examples from CIFAR10 data files.

  Recommendation: if you want N-way read parallelism, call this function
  N times.  This will give you N independent Readers reading different
  files & positions within those files, which will give better mixing of
  examples.

  Args:
    filename_queue: A queue of strings with the filenames to read from.

  Returns:
    An object representing a single example, with the following fields:
      height: number of rows in the result (32)
      width: number of columns in the result (32)
      depth: number of color channels in the result (3)
      key: a scalar string Tensor describing the filename & record number
        for this example.
      label: an int32 Tensor with the label in the range 0..9.
      uint8image: a [height, width, depth] uint8 Tensor with the image data
  """

  class CIFAR10Record(object):
    pass
  result = CIFAR10Record()

  # Dimensions of the images in the CIFAR-10 dataset.
  # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
  # input format.
  #分类结果的长度,CIFAR-100长度为2
  label_bytes = 1  # 2 for CIFAR-100
  result.height = 32
  result.width = 32
  #3位表示rgb颜色(0-255,0-255,0-255)
  result.depth = 3
  image_bytes = result.height * result.width * result.depth
  # Every record consists of a label followed by the image, with a
  # fixed number of bytes for each.
  #单个记录的总长度=分类结果长度+图片长度
  record_bytes = label_bytes + image_bytes

  # Read a record, getting filenames from the filename_queue.  No
  # header or footer in the CIFAR-10 format, so we leave header_bytes
  # and footer_bytes at their default of 0.
  reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
  result.key, value = reader.read(filename_queue)

  # Convert from a string to a vector of uint8 that is record_bytes long.
  record_bytes = tf.decode_raw(value, tf.uint8)

  # 第一位代表lable-图片的正确分类结果,从uint8转换为int32类型
  # The first bytes represent the label, which we convert from uint8->int32.
  result.label = tf.cast(
      tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)

  # 分类结果之后的数据代表图片,我们重新调整大小
  # The remaining bytes after the label represent the image, which we reshape
  # from [depth * height * width] to [depth, height, width].
  depth_major = tf.reshape(
      tf.strided_slice(record_bytes, [label_bytes],
                       [label_bytes + image_bytes]),
      [result.depth, result.height, result.width])
  # 格式转换,从[颜色,高度,宽度]--》[高度,宽度,颜色]
  # Convert from [depth, height, width] to [height, width, depth].
  result.uint8image = tf.transpose(depth_major, [1, 2, 0])

  return result

#构建一个排列后的一组图片和分类
def _generate_image_and_label_batch(image, label, min_queue_examples,
                                    batch_size, shuffle):
  """Construct a queued batch of images and labels.

  Args:
    image: 3-D Tensor of [height, width, 3] of type.float32.
    label: 1-D Tensor of type.int32
    min_queue_examples: int32, minimum number of samples to retain
      in the queue that provides of batches of examples.
    batch_size: Number of images per batch.
    shuffle: boolean indicating whether to use a shuffling queue.

  Returns:
    images: Images. 4D tensor of [batch_size, height, width, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  # Create a queue that shuffles the examples, and then
  # read ‘batch_size‘ images + labels from the example queue.
  # 线程数
  num_preprocess_threads = 8
  if shuffle:
    images, label_batch = tf.train.shuffle_batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size,
        min_after_dequeue=min_queue_examples)
  else:
    images, label_batch = tf.train.batch(
        [image, label],
        batch_size=batch_size,
        num_threads=num_preprocess_threads,
        capacity=min_queue_examples + 3 * batch_size)

  # Display the training images in the visualizer.
  tf.summary.image(‘images‘, images)

  return images, tf.reshape(label_batch, [batch_size])

#构建变换输入
def distorted_inputs(data_dir, batch_size):
  """Construct distorted input for CIFAR training using the Reader ops.

  Args:
    data_dir: Path to the CIFAR-10 data directory.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)
               for i in xrange(1, 6)]
  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError(‘Failed to find file: ‘ + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # Image processing for training the network. Note the many random
  # distortions applied to the image.
  # 随机裁剪图片
  # Randomly crop a [height, width] section of the image.
  distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
  # 随机旋转图片
  # Randomly flip the image horizontally.
  distorted_image = tf.image.random_flip_left_right(distorted_image)

  # Because these operations are not commutative, consider randomizing
  # the order their operation.
  # 亮度变换
  distorted_image = tf.image.random_brightness(distorted_image,
                                               max_delta=63)
  #对比度变换
  distorted_image = tf.image.random_contrast(distorted_image,
                                             lower=0.2, upper=1.8)

  # Subtract off the mean and divide by the variance of the pixels.
  # Linearly scales image to have zero mean and unit norm
  # 标准化
  float_image = tf.image.per_image_standardization(distorted_image)

  # Set the shapes of tensors.
  float_image.set_shape([height, width, 3])
  read_input.label.set_shape([1])

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                           min_fraction_of_examples_in_queue)
  print (‘Filling queue with %d CIFAR images before starting to train. ‘
         ‘This will take a few minutes.‘ % min_queue_examples)

  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples, batch_size,
                                         shuffle=True)

# 为CIFAR评价构建输入
# eval_data使用训练还是评价数据集
# data_dir路径
# batch_size一个组的大小
def inputs(eval_data, data_dir, batch_size):
  """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.
    data_dir: Path to the CIFAR-10 data directory.
    batch_size: Number of images per batch.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  if not eval_data:
    filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)
                 for i in xrange(1, 6)]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  else:
    filenames = [os.path.join(data_dir, ‘test_batch.bin‘)]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError(‘Failed to find file: ‘ + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # Image processing for evaluation.
  # Crop the central [height, width] of the image.
  resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
                                                         height, width)

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_standardization(resized_image)

  # Set the shapes of tensors.
  float_image.set_shape([height, width, 3])
  read_input.label.set_shape([1])

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(num_examples_per_epoch *
                           min_fraction_of_examples_in_queue)

  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples, batch_size,
                                         shuffle=False)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# ==============================================================================

"""Routine for decoding the CIFAR-10 binary file format."""

from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_function

import os

from six.moves import xrange  # pylint: disable=redefined-builtinimport tensorflow as tf

# 定义图片的像素,原生图片32 x 32# Process images of this size. Note that this differs from the original CIFAR# image size of 32 x 32. If one alters this number, then the entire model# architecture will change and any model would need to be retrained.#IMAGE_SIZE = 24IMAGE_SIZE = 32# Global constants describing the CIFAR-10 data set.#分类数量NUM_CLASSES = 10#训练集大小NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000#评价集大小NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000

#从CIFAR10数据文件中读取样例#filename_queue一个队列的文件名def read_cifar10(filename_queue):  """  Reads and parses examples from CIFAR10 data files.

Recommendation: if you want N-way read parallelism, call this function  N times.  This will give you N independent Readers reading different  files & positions within those files, which will give better mixing of  examples.

Args:    filename_queue: A queue of strings with the filenames to read from.

Returns:    An object representing a single example, with the following fields:      height: number of rows in the result (32)      width: number of columns in the result (32)      depth: number of color channels in the result (3)      key: a scalar string Tensor describing the filename & record number        for this example.      label: an int32 Tensor with the label in the range 0..9.      uint8image: a [height, width, depth] uint8 Tensor with the image data  """

class CIFAR10Record(object):    passresult = CIFAR10Record()

# Dimensions of the images in the CIFAR-10 dataset.  # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the  # input format.  #分类结果的长度,CIFAR-100长度为2label_bytes = 1  # 2 for CIFAR-100result.height = 32result.width = 32#3位表示rgb颜色(0-255,0-255,0-255)result.depth = 3image_bytes = result.height * result.width * result.depth  # Every record consists of a label followed by the image, with a  # fixed number of bytes for each.  #单个记录的总长度=分类结果长度+图片长度record_bytes = label_bytes + image_bytes

# Read a record, getting filenames from the filename_queue.  No  # header or footer in the CIFAR-10 format, so we leave header_bytes  # and footer_bytes at their default of 0.reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)  result.key, value = reader.read(filename_queue)

# Convert from a string to a vector of uint8 that is record_bytes long.record_bytes = tf.decode_raw(value, tf.uint8)

# 第一位代表lable-图片的正确分类结果,从uint8转换为int32类型  # The first bytes represent the label, which we convert from uint8->int32.result.label = tf.cast(      tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)

# 分类结果之后的数据代表图片,我们重新调整大小  # The remaining bytes after the label represent the image, which we reshape  # from [depth * height * width] to [depth, height, width].depth_major = tf.reshape(      tf.strided_slice(record_bytes, [label_bytes],                       [label_bytes + image_bytes]),      [result.depth, result.height, result.width])  # 格式转换,从[颜色,高度,宽度]--》[高度,宽度,颜色]  # Convert from [depth, height, width] to [height, width, depth].result.uint8image = tf.transpose(depth_major, [1, 2, 0])

return result

#构建一个排列后的一组图片和分类def _generate_image_and_label_batch(image, label, min_queue_examples,                                    batch_size, shuffle):  """Construct a queued batch of images and labels.

Args:    image: 3-D Tensor of [height, width, 3] of type.float32.    label: 1-D Tensor of type.int32    min_queue_examples: int32, minimum number of samples to retain      in the queue that provides of batches of examples.    batch_size: Number of images per batch.    shuffle: boolean indicating whether to use a shuffling queue.

Returns:    images: Images. 4D tensor of [batch_size, height, width, 3] size.    labels: Labels. 1D tensor of [batch_size] size.  """  # Create a queue that shuffles the examples, and then  # read ‘batch_size‘ images + labels from the example queue.  # 线程数num_preprocess_threads = 8if shuffle:    images, label_batch = tf.train.shuffle_batch(        [image, label],        batch_size=batch_size,        num_threads=num_preprocess_threads,        capacity=min_queue_examples + 3 * batch_size,        min_after_dequeue=min_queue_examples)  else:    images, label_batch = tf.train.batch(        [image, label],        batch_size=batch_size,        num_threads=num_preprocess_threads,        capacity=min_queue_examples + 3 * batch_size)

# Display the training images in the visualizer.tf.summary.image(‘images‘, images)

return images, tf.reshape(label_batch, [batch_size])

#构建变换输入def distorted_inputs(data_dir, batch_size):  """Construct distorted input for CIFAR training using the Reader ops.

Args:    data_dir: Path to the CIFAR-10 data directory.    batch_size: Number of images per batch.

Returns:    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.    labels: Labels. 1D tensor of [batch_size] size.  """filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)               for i in xrange(1, 6)]  for f in filenames:    if not tf.gfile.Exists(f):      raise ValueError(‘Failed to find file: ‘ + f)

# Create a queue that produces the filenames to read.filename_queue = tf.train.string_input_producer(filenames)

# Read examples from files in the filename queue.read_input = read_cifar10(filename_queue)  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

height = IMAGE_SIZE  width = IMAGE_SIZE

# Image processing for training the network. Note the many random  # distortions applied to the image.  # 随机裁剪图片  # Randomly crop a [height, width] section of the image.distorted_image = tf.random_crop(reshaped_image, [height, width, 3])  # 随机旋转图片  # Randomly flip the image horizontally.distorted_image = tf.image.random_flip_left_right(distorted_image)

# Because these operations are not commutative, consider randomizing  # the order their operation.  # 亮度变换distorted_image = tf.image.random_brightness(distorted_image,                                               max_delta=63)  #对比度变换distorted_image = tf.image.random_contrast(distorted_image,                                             lower=0.2, upper=1.8)

# Subtract off the mean and divide by the variance of the pixels.  # Linearly scales image to have zero mean and unit norm  # 标准化float_image = tf.image.per_image_standardization(distorted_image)

# Set the shapes of tensors.float_image.set_shape([height, width, 3])  read_input.label.set_shape([1])

# Ensure that the random shuffling has good mixing properties.min_fraction_of_examples_in_queue = 0.4min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *                           min_fraction_of_examples_in_queue)  print (‘Filling queue with %d CIFAR images before starting to train. ‘         ‘This will take a few minutes.‘ % min_queue_examples)

# Generate a batch of images and labels by building up a queue of examples.return _generate_image_and_label_batch(float_image, read_input.label,                                         min_queue_examples, batch_size,                                         shuffle=True)

# 为CIFAR评价构建输入# eval_data使用训练还是评价数据集# data_dir路径# batch_size一个组的大小def inputs(eval_data, data_dir, batch_size):  """Construct input for CIFAR evaluation using the Reader ops.

Args:    eval_data: bool, indicating if one should use the train or eval data set.    data_dir: Path to the CIFAR-10 data directory.    batch_size: Number of images per batch.

Returns:    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.    labels: Labels. 1D tensor of [batch_size] size.  """if not eval_data:    filenames = [os.path.join(data_dir, ‘data_batch_%d.bin‘ % i)                 for i in xrange(1, 6)]    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN  else:    filenames = [os.path.join(data_dir, ‘test_batch.bin‘)]    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

for f in filenames:    if not tf.gfile.Exists(f):      raise ValueError(‘Failed to find file: ‘ + f)

# Create a queue that produces the filenames to read.filename_queue = tf.train.string_input_producer(filenames)

# Read examples from files in the filename queue.read_input = read_cifar10(filename_queue)  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

height = IMAGE_SIZE  width = IMAGE_SIZE

# Image processing for evaluation.  # Crop the central [height, width] of the image.resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,                                                         height, width)

# Subtract off the mean and divide by the variance of the pixels.float_image = tf.image.per_image_standardization(resized_image)

# Set the shapes of tensors.float_image.set_shape([height, width, 3])  read_input.label.set_shape([1])

# Ensure that the random shuffling has good mixing properties.min_fraction_of_examples_in_queue = 0.4min_queue_examples = int(num_examples_per_epoch *                           min_fraction_of_examples_in_queue)

# Generate a batch of images and labels by building up a queue of examples.return _generate_image_and_label_batch(float_image, read_input.label,                                         min_queue_examples, batch_size,                                         shuffle=False)
时间: 2024-08-27 08:31:34

Tensorflow样例代码分析cifar10的相关文章

内存损坏问题的演示样例及分析

原文以演示样例代码系统的讲述了三种内存损坏的情况: 全局内存.栈损坏及堆损坏, 以及它们产生的原因. 粗略整理例如以下. Global Memory Corruption 即全局变量的内存使用出了问题,主要还是越界. 例如以下代码: #include <stdio.h> #define MAX 6 int arrdata[MAX]; int endval; int main() { int i = 0; endval = 12; for (i = MAX; (endval) &&

【实用】Dev C++编译器属性代码缺省源样例代码

这是我刚刚发明的东西,比较好用... 源代码复制到Dev C++编译器属性的代码缺省源内即可.. 用法: 包含常用头文件和ctime时间监控插件,常用缩写,快速读入,测试输出函数两个,读入函数,文件读入读出函数,work函数,主函数,还定义了所有的字母变量和常用的其他数组之类的变量.. 上手很快,功能强大..大家自己做一点补充吧.. 那些模板不想放进来,免得A+B都有好几K.. #include<iostream> #include<cstdlib> #include<cst

C#调用 Oracle 存储过程样例代码

-- 建表CREATE TABLE sale_report (     sale_date DATE NOT NULL ,     sale_item VARCHAR(2) NOT NULL ,      sale_money DECIMAL(10,2) NOT NULL,      PRIMARY KEY(sale_date, sale_item)); -- 測试数据DECLAREv_begin_day DATE;v_end_day DATE;BEGIN v_begin_day := TO_D

C编程规范, 演示样例代码。

/*************************************************************** *Copyright (c) 2014,TianYuan *All rights reserved. * *文件名: standard.h *文件标识: 编程规范演示样例代码 * *当前版本号:V1.0 *作者:wuyq *完毕日期:20140709 * *改动记录1: //改动历史记录.包含改动日期.版本号号.改动人及改动内容等 *改动日期 版本号号 改动人 改动内

10分钟理解Android数据库的创建与使用(附具体解释和演示样例代码)

1.Android数据库简单介绍. Android系统的framework层集成了Sqlite3数据库.我们知道Sqlite3是一种轻量级的高效存储的数据库. Sqlite数据库具有以下长处: (1)零配置,无需安装和配置: (2)储存在单一磁盘文件里的一个完整的数据库. (3)数据库文件能够在不同字节顺序的机器间自由共享: (4)支持数据大小至2TB: (5)足够小.全部源码大致3万行C代码.250KB: (6)比眼下流行的大多数数据库的操作要快. (7)开源. 2.Sqlite 基本操作语句

[Python] SQLBuilder 演示样例代码

用Python写一个SQLBuilder.Java版能够从 http://www.java2s.com/Code/Java/Database-SQL-JDBC/SQLBuilder.htm 看到. 附上代码: 演示样例代码(一): class SQLDirector: @classmethod def buildSQL(cls, builder): sql = "" sql += builder.getCommand() sql += builder.getTable() sql +=

appium样例代码

appium样例代码 com.appium.driver包下创建InitDriver.java类: package com.appium.driver; import java.io.File; import java.net.MalformedURLException; import java.net.URL; import org.openqa.selenium.remote.DesiredCapabilities; import org.openqa.selenium.remote.Rem

1个TensorFlow样例,终于明白如何实现前向传播过程?

神经网络的结构,就是不同神经元间的连接结构 –图示了一个三层全连接神经网络. 神经元结构的输出,是所有输入的加权.加上偏置项,再经过一个激活(传递)函数得到. 全连接神经网络 全连接神经网络,就是相邻两层之间,任意两个节点之间都有连接. –这也是其与后面介绍的卷积层.LSTM结构的区分. –除了输入层,所有节点都代表了一个神经元的结构. 计算神经网络的前向传播结果,要三部分信息. –第一个部分是神经网络的输入,这个输入就是从实体中提取的特征向量. –第二个部分为神经网络的连接结构.神经网络是由神

java 状态模式 解说演示样例代码

package org.rui.pattern; import junit.framework.*; /** * 为了使同一个方法调用能够产生不同的行为,State 模式在代理(surrogate)的 * 生命周期内切换它所相应的实现(implementation).当你发现,在决定怎样实现任 对象去耦(Object decoupling) * http://blog.csdn.net/lxwde 28 何一个方法之前都必须作非常多測试的情况下,这是一种优化实现代码的方法.比如, * 童话故事青