tensorflow如何查看版本,tflearn如何混合模型

查看版本

import tensorflow as tf

print tf.__version__

混合模型

‘‘‘
  Demonstrate that weights saved with models in one scope, can be loaded
  into models being used in a different scope.
   
  This allows multiple models to be run, and combined models to load
  weights from separately trained models.
  ‘‘‘
   
  from __future__ import division, print_function, absolute_import
   
  import re
  import tflearn
  import tensorflow as tf
  import tflearn.datasets.mnist as mnist
   
  from tflearn.layers.core import input_data, dropout, fully_connected
  from tflearn.layers.conv import conv_2d, max_pool_2d
  from tflearn.layers.normalization import local_response_normalization
  from tflearn.layers.estimator import regression
   
  #-----------------------------------------------------------------------------
   
  class Model1(object):
  ‘‘‘
  convnet MNIST
  ‘‘‘
  def __init__(self):
  network = tflearn.input_data(shape=[None, 784], name="input")
  network = self.make_core_network(network)
  network = regression(network, optimizer=‘adam‘, learning_rate=0.01,
  loss=‘categorical_crossentropy‘, name=‘target‘)
   
  model = tflearn.DNN(network, tensorboard_verbose=0)
  self.model = model
   
  @staticmethod
  def make_core_network(network):
  network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
  network = conv_2d(network, 32, 3, activation=‘relu‘, regularizer="L2")
  network = max_pool_2d(network, 2)
  network = local_response_normalization(network)
  network = conv_2d(network, 64, 3, activation=‘relu‘, regularizer="L2")
  network = max_pool_2d(network, 2)
  network = local_response_normalization(network)
  network = fully_connected(network, 128, activation=‘tanh‘)
  network = dropout(network, 0.8)
  network = fully_connected(network, 256, activation=‘tanh‘)
  network = dropout(network, 0.8)
  network = fully_connected(network, 10, activation=‘softmax‘)
  return network
   
  def train(self, X, Y, testX, testY, n_epoch=1, snapshot_step=1000):
  # Training
  self.model.fit({‘input‘: X}, {‘target‘: Y}, n_epoch=n_epoch,
  validation_set=({‘input‘: testX}, {‘target‘: testY}),
  snapshot_step=snapshot_step,
  show_metric=True, run_id=‘convnet_mnist‘)
   
  class Model2(object):
  ‘‘‘
  dnn MNIST
  ‘‘‘
  def __init__(self):
  # Building deep neural network
  network = tflearn.input_data(shape=[None, 784], name="input")
  network = self.make_core_network(network)
   
  # Regression using SGD with learning rate decay and Top-3 accuracy
  sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
  top_k = tflearn.metrics.Top_k(3)
   
  network = tflearn.regression(network, optimizer=sgd, metric=top_k,
  loss=‘categorical_crossentropy‘, name="target")
  model = tflearn.DNN(network, tensorboard_verbose=0)
  self.model = model
   
  @staticmethod
  def make_core_network(network):
  dense1 = tflearn.fully_connected(network, 64, activation=‘tanh‘,
  regularizer=‘L2‘, weight_decay=0.001, name="dense1")
  dropout1 = tflearn.dropout(dense1, 0.8)
  dense2 = tflearn.fully_connected(dropout1, 64, activation=‘tanh‘,
  regularizer=‘L2‘, weight_decay=0.001, name="dense2")
  dropout2 = tflearn.dropout(dense2, 0.8)
  softmax = tflearn.fully_connected(dropout2, 10, activation=‘softmax‘, name="softmax")
  return softmax
   
  def train(self, X, Y, testX, testY, n_epoch=1, snapshot_step=1000):
  # Training
  self.model.fit(X, Y, n_epoch=n_epoch, validation_set=(testX, testY),
  snapshot_step=snapshot_step,
  show_metric=True, run_id="dense_model")
   
  class Model12(object):
  ‘‘‘
  Combination of two networks
  ‘‘‘
  def __init__(self):
  inputs = tflearn.input_data(shape=[None, 784], name="input")
   
  with tf.variable_scope("scope1") as scope:
  net_conv = Model1.make_core_network(inputs) # shape (?, 10)
  with tf.variable_scope("scope2") as scope:
  net_dnn = Model2.make_core_network(inputs) # shape (?, 10)
   
  network = tf.concat([net_conv, net_dnn], 1, name="concat") # shape (?, 20)
  network = tflearn.fully_connected(network, 10, activation="softmax")
  network = regression(network, optimizer=‘adam‘, learning_rate=0.01,
  loss=‘categorical_crossentropy‘, name=‘target‘)
   
  self.model = tflearn.DNN(network, tensorboard_verbose=0)
   
  def load_from_two(self, m1fn, m2fn):
  self.model.load(m1fn, scope_for_restore="scope1", weights_only=True)
  self.model.load(m2fn, scope_for_restore="scope2", weights_only=True, create_new_session=False)
   
  def train(self, X, Y, testX, testY, n_epoch=1, snapshot_step=1000):
  # Training
  self.model.fit(X, Y, n_epoch=n_epoch, validation_set=(testX, testY),
  snapshot_step=snapshot_step,
  show_metric=True, run_id="model12")
   
  #-----------------------------------------------------------------------------
   
  X, Y, testX, testY = mnist.load_data(one_hot=True)
   
  def prepare_model1_weights_file():
  tf.reset_default_graph()
  m1 = Model1()
  m1.train(X, Y, testX, testY, 2)
  m1.model.save("model1.tfl")
   
  def prepare_model1_weights_file_in_scopeQ():
  tf.reset_default_graph()
  with tf.variable_scope("scopeQ") as scope:
  m1 = Model1()
  m1.model.fit({"scopeQ/input": X}, {"scopeQ/target": Y}, n_epoch=1, validation_set=0.1, show_metric=True, run_id="model1_scopeQ")
  m1.model.save("model1_scopeQ.tfl")
   
  def prepare_model2_weights_file():
  tf.reset_default_graph()
  m2 = Model2()
  m2.train(X, Y, testX, testY, 1)
  m2.model.save("model2.tfl")
   
  def demonstrate_loading_weights_into_different_scope():
  print("="*60 + " Demonstrate loading weights saved in scopeQ, into variables now in scopeA")
  tf.reset_default_graph()
  with tf.variable_scope("scopeA") as scope:
  m1a = Model1()
  print ("=" * 60 + " Trying to load model1 weights from scopeQ into scopeA")
  m1a.model.load("model1_scopeQ.tfl", variable_name_map=("scopeA", "scopeQ"), verbose=True)
   
  def demonstrate_loading_weights_into_different_scope_using_custom_function():
  print("="*60 + " Demonstrate loading weights saved in scopeQ, into variables now in scopeA, using custom map function")
  tf.reset_default_graph()
  def vname_map(ename): # variables were saved in scopeA, but we want to load into scopeQ
  name_in_file = ename.replace("scopeA", "scopeQ")
  print ("%s -> %s" % (ename, name_in_file))
  return name_in_file
  with tf.variable_scope("scopeA") as scope:
  m1a = Model1()
  print ("=" * 60 + " Trying to load model1 weights from scopeQ into scopeA")
  m1a.model.load("model1_scopeQ.tfl", variable_name_map=vname_map, verbose=True)
   
  def demonstrate_loading_two_instances_of_model1():
  print("="*60 + " Demonstrate loading weights from model1 into two instances of model1 in scopeA and scopeB")
  tf.reset_default_graph()
  with tf.variable_scope("scopeA") as scope:
  m1a = Model1()
  print ("-" * 40 + " Trying to load model1 weights: should fail")
  try:
  m1a.model.load("model1.tfl", weights_only=True)
  except Exception as err:
  print ("Loading failed, with error as expected, because variables are in scopeA")
  print ("error: %s" % str(err))
  print ("-" * 40)
   
  print ("=" * 60 + " Trying to load model1 weights: should succeed")
  m1a.model.load("model1.tfl", scope_for_restore="scopeA", verbose=True, weights_only=True)
   
  with tf.variable_scope("scopeB") as scope:
  m1b = Model1()
  m1b.model.load("model1.tfl", scope_for_restore="scopeB", verbose=True, weights_only=True)
  print ("="*60 + " Successfully restored weights to two instances of model1, in different scopes")
   
  def demonstrate_combined_model1_and_model2_network():
  print("="*60 + " Demonstrate loading weights from model1 and model2 into new mashup network model12")
  print ("-"*40 + " Creating mashup of model1 and model2 networks")
  tf.reset_default_graph()
  m12 = Model12()
  print ("-"*60 + " Loading model1 and model2 weights into mashup")
  m12.load_from_two("model1.tfl", "model2.tfl")
  print ("-"*60 + " Training mashup")
  m12.train(X, Y, testX, testY, 1)
  print ("-"*60 + " Saving mashup weights")
  m12.model.save("model12.tfl")
  print ("-"*60 + " Done")
   
  print("="*77)
  prepare_model1_weights_file()
  prepare_model2_weights_file()
  prepare_model1_weights_file_in_scopeQ()
  print("-"*77)
  print("-"*77)
   
  demonstrate_loading_weights_into_different_scope()
  demonstrate_loading_weights_into_different_scope_using_custom_function()
  demonstrate_loading_two_instances_of_model1()
  demonstrate_combined_model1_and_model2_network()
   
  print("="*77)
时间: 2024-08-29 15:48:52

tensorflow如何查看版本,tflearn如何混合模型的相关文章

玩转MySQL之Linux下的简单操作(服务启动与关闭、启动与关闭、查看版本)

小弟今天记录一下在Linux系统下面的MySQL的简单使用,如下: 服务启动与关闭 启动与关闭 查看版本 环境 Linux版本:centeros 6.6(下面演示),Ubuntu 12.04(参见文章末尾红色标注字体) MySQL版本:5.1.73 查看MySQL服务的启动状态, 输入命令: /etc/init.d/mysqld status 或者: service mysqld status 示例图: 或者 接着启动MySQL的服务, 输入命令:service mysqld start 或者:

Linux命令:查看版本信息+删除

1.查看版本信息 (1)查看内核版本 # cat /proc/version Linux version 3.10.0-229.el7.x86_64 ([email protected]) (gcc version 4.8.2 20140120 (Red Hat 4.8.2-16) (GCC) ) #1 SMP Fri Mar 6 11:36:42 UTC 2015 # uname -a Linux VM_230_236_centos 3.10.0-229.el7.x86_64 #1 SMP F

Linux下如何查看版本信息

Linux下如何查看版本信息, 包括位数.版本信息以及CPU内核信息.CPU具体型号等等,整个CPU信息一目了然. 1.# uname -a   (Linux查看版本当前操作系统内核信息) Linux localhost.localdomain 2.4.20-8 #1 Thu Mar 13 17:54:28 EST 2003 i686 athlon i386 GNU/Linux 2.# cat /proc/version (Linux查看当前操作系统版本信息) Linux version 2.4

查看版本信息的命令

原文:查看版本信息的命令 本文收录本人工作中查看软件版本信息的命令: 操作系统: windows :打开命令行→systeminfo >xx.txt,即可把windows的信息输出到xx.txt文件里面.一般可以用来查看操作系统的位数等等信息. Ubuntu:sudo lsb_release -a 软件: SQLServer:打开ssms→ select @@version 或者执行:均可查看SQL SERVER的版本甚至更详细的信息. xp_msver 执行系统存储过程:也能获得信息.但该存储

openssl 查看版本和配置文件位置

进入命令行输入openssl version 可查看版本信息,如下图: 输入openssl version -a命令可查看更详细的信息 配置文件就位于OPENSSLDIR目录下. 原文地址:https://www.cnblogs.com/liyuchuan/p/12402025.html

windows10安装tensorflow的gpu版本(pip3安装方式)

前言: TensorFlow 有cpu和 gpu两个版本:gpu版本需要英伟达CUDA 和 cuDNN 的支持,cpu版本不需要:本文主要安装gpu版本. 1.环境 gpu:确认你的显卡支持 CUDA,这里确认. vs2015运行时库:下载64位的,这里下载,下载后安装. python 3.6/3.5:下载64位的,这里下载,下载后安装. pip 9.0.1(确认pip版本 >= 8.1,用pip -V 查看当前 pip 版本,用python -m pip install -U pip升级pip

Linux系统如何查看版本信息

在windows下我们查看系统信息直接右击电脑属性就可以查看了.那么在Linux系统中如何查看呢.下面给大家介绍几种查看的方式 输入"uname -a ",可显示电脑以及操作系统的相关信息. 输入"cat /proc/version",说明正在运行的内核版本. 输入"cat /etc/issue", 显示的是发行版本信息 lsb_release -a (适用于所有的linux,包括Redhat.SuSE.Debian等发行版,但是在debian下

Ubuntu查看版本信息

关于查看Ubuntu的版本信息,我们会用到两个命令uname和cat. uname命令 这个命令用于显示系统信息.其参数为: -a 显示所有系统信息.其中包括机器名.操作系统名.内核名称等. 以下为执行unmae -a后的截屏. 从结果中可以看到这是一个Linux系统,机器名被我做了模糊处理,内核版本号为4.10.0-28-generic,发行商名称为Ubuntu,发行版本号为16.04.2. 除了-a之外,还有-s.-m.-r.-v.-p等参数,具体用法请参考man信息. cat命令 这个ca

Linux 查看版本详情

内核版本的信 uname -a -a选项表示察看所有的信息,但是从输出信息可以看出来,uname看到的版本信息,只是内核版本的信息,而不是发行版的版本信息 查看发行版信息 $cat /etc/issue