daima

# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import time
#定义数据类型

np.random.seed(0)
train_X, train_y = datasets.make_moons(300, noise=0.20)
train_X = train_X.astype(np.float32)
train_y = train_y.astype(np.int32)
num_example=len(train_X)

#设置参数
nn_input_dim=2 #输入神经元个数
nn_output_dim=2 #输出神经元个数
nn_hdim=100
#梯度下降参数
epsilon=0.01 #learning rate
reg_lambda=0.01 #正则化长度

#设置共享变量

w1=theano.shared(np.random.randn(nn_input_dim,nn_hdim),name="W1")
b1=theano.shared(np.zeros(nn_hdim),name="b1")
w2=theano.shared(np.random.randn(nn_hdim,nn_output_dim),name="W2")
b2=theano.shared(np.zeros(nn_output_dim),name="b2")

#前馈算法
X=T.matrix(‘X‘)  #double类型的矩阵
y=T.lvector(‘y‘) #int64类型的向量
z1=X.dot(w1)+b1
a1=T.tanh(z1)
z2=a1.dot(w2)+b2
y_hat=T.nnet.softmax(z2)
#正则化项
loss_reg=1./num_example * reg_lambda/2 * (T.sum(T.square(w1))+T.sum(T.square(w2)))
loss=T.nnet.categorical_crossentropy(y_hat,y).mean()+loss_reg
#预测结果
prediction=T.argmax(y_hat,axis=1)

forword_prop=theano.function([X],y_hat)
calculate_loss=theano.function([X,y],loss)
predict=theano.function([X],prediction)

#求导
dw2=T.grad(loss,w2)
db2=T.grad(loss,b2)
dw1=T.grad(loss,w1)
db1=T.grad(loss,b1)

#更新值
gradient_step=theano.function(
    [X,y],
    updates=(
        (w2,w2-epsilon*dw2),
        (b2,b2-epsilon*db2),
        (w1,w1-epsilon*dw1),
        (b1,b1-epsilon*db1)

    )
)

def build_model(num_passes=20000,print_loss=False):

    w1.set_value(np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim))
    b1.set_value(np.zeros(nn_hdim))
    w2.set_value(np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim))
    b2.set_value(np.zeros(nn_output_dim))

    for i in xrange(0,num_passes):
        gradient_step(train_X,train_y)
        if print_loss and i%1000==0:
            print "Loss after iteration %i: %f" %(i,calculate_loss(train_X,train_y))
def accuracy_rate():
    predict_result=predict(train_X)
    count=0;
    for i in range(len(predict_result)):
        realResult=train_y[i]
        if(realResult==predict_result[i]):
            count+=1
    print "the correct rate is :%f" %(float(count)/len(predict_result))

def plot_decision_boundary(pred_func):
    # Set min and max values and give it some padding
    x_min, x_max = train_X[:, 0].min() - .5, train_X[:, 0].max() + .5
    y_min, y_max = train_X[:, 1].min() - .5, train_X[:, 1].max() + .5
    h = 0.01
    # Generate a grid of points with distance h between them
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    # Predict the function value for the whole gid
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(train_X[:, 0], train_X[:, 1], c=train_y, cmap=plt.cm.Spectral)
    plt.show()

build_model(print_loss=True)
accuracy_rate()
# # plot_decision_boundary(lambda x: predict(x))
# # plt.title("Decision Boundary for hidden layer size 3")
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import time
#定义数据类型

np.random.seed(0)
train_X, train_y = datasets.make_moons(5000, noise=0.20)
train_y_onehot = np.eye(2)[train_y]

#设置参数
num_example=len(train_X)
nn_input_dim=2 #输入神经元个数
nn_output_dim=2 #输出神经元个数
nn_hdim=1000
#梯度下降参数
epsilon=np.float32(0.01) #learning rate
reg_lambda=np.float32(0.01) #正则化长度

#设置共享变量
# GPU NOTE: Conversion to float32 to store them on the GPU!
X = theano.shared(train_X.astype(‘float32‘)) # initialized on the GPU
y = theano.shared(train_y_onehot.astype(‘float32‘))
# GPU NOTE: Conversion to float32 to store them on the GPU!
w1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim).astype(‘float32‘), name=‘W1‘)
b1 = theano.shared(np.zeros(nn_hdim).astype(‘float32‘), name=‘b1‘)
w2 = theano.shared(np.random.randn(nn_hdim, nn_output_dim).astype(‘float32‘), name=‘W2‘)
b2 = theano.shared(np.zeros(nn_output_dim).astype(‘float32‘), name=‘b2‘)

#前馈算法
z1=X.dot(w1)+b1
a1=T.tanh(z1)
z2=a1.dot(w2)+b2
y_hat=T.nnet.softmax(z2)
#正则化项
loss_reg=1./num_example * reg_lambda/2 * (T.sum(T.square(w1))+T.sum(T.square(w2)))
loss=T.nnet.categorical_crossentropy(y_hat,y).mean()+loss_reg
#预测结果
prediction=T.argmax(y_hat,axis=1)

forword_prop=theano.function([],y_hat)
calculate_loss=theano.function([],loss)
predict=theano.function([],prediction)

#求导
dw2=T.grad(loss,w2)
db2=T.grad(loss,b2)
dw1=T.grad(loss,w1)
db1=T.grad(loss,b1)

#更新值
gradient_step=theano.function(
    [],
    updates=(
        (w2,w2-epsilon*dw2),
        (b2,b2-epsilon*db2),
        (w1,w1-epsilon*dw1),
        (b1,b1-epsilon*db1)

    )
)

def build_model(num_passes=20000,print_loss=False):

    w1.set_value((np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)).astype(‘float32‘))
    b1.set_value(np.zeros(nn_hdim).astype(‘float32‘))
    w2.set_value((np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)).astype(‘float32‘))
    b2.set_value(np.zeros(nn_output_dim).astype(‘float32‘))

    for i in xrange(0,num_passes):
        start=time.time()
        gradient_step()
        end=time.time()
        # print "time require:"
        # print(end-start)
        if print_loss and i%1000==0:
            print "Loss after iteration %i: %f" %(i,calculate_loss())

def accuracy_rate():
    predict_result=predict()
    count=0;
    for i in range(len(predict_result)):
        realResult=train_y[i]
        if(realResult==predict_result[i]):
            count+=1
    print "count"
    print count
    print "the correct rate is :%f" %(float(count)/len(predict_result))

def plot_decision_boundary(pred_func):
    # Set min and max values and give it some padding
    x_min, x_max = train_X[:, 0].min() - .5, train_X[:, 0].max() + .5
    y_min, y_max = train_X[:, 1].min() - .5, train_X[:, 1].max() + .5
    h = 0.01
    # Generate a grid of points with distance h between them
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    # Predict the function value for the whole gid
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.scatter(train_X[:, 0], train_X[:, 1], c=train_y, cmap=plt.cm.Spectral)
    plt.show()

build_model(print_loss=True)
accuracy_rate()

# plot_decision_boundary(lambda x: predict(x))
# plt.title("Decision Boundary for hidden layer size 3")
时间: 2024-12-28 15:47:31

daima的相关文章

UVA 11346 - Probability 数学积分

Consider rectangular coordinate system and point L(X, Y ) which is randomly chosen among all pointsin the area A which is de?ned in the following manner: A = {(x, y)|x ∈ [−a; a];y ∈ [−b; b]}. What isthe probability P that the area of a rectangle that

UVA 10127- Ones 数学

Given any integer 0 ≤ n ≤ 10000 not divisibleby 2 or 5, some multiple of n is a number whichin decimal notation is a sequence of 1’s. Howmany digits are in the smallest such a multipleof n?InputA ?le of integers at one integer per line.OutputEach out

vs中部分快捷键

ctrl + r ctrl + r     ctrl 按两次r修改变量名(修改被引用到的变量名),不同于ctrl + f(修改全部相同名字的) ctrl +r ctrl + m    先选中一部分daima片段,提取方法 ctrl +c  复制一行代码,只需光标放在改行 ctrl + z 撤销 ctrl + y 反撤销 ctrl + k + c 注释 ctrl  + k + u  去掉注释 F12    转到定义 shift + f12  查找所有引用

测试视频

我们的开始 这是一个重新的开始,我们都有一些思考 但是好sdfsff非常不好 只是我们不想去多的 还有的就是我们不想去 但是这是一个剑所得的开始 非常多的人,都喜欢这样 好吧,我想投降了,但是记得 我们都是这样的人 1) 非常多的想法 2) 不用其他行动 3) 参与进来都不用 4) 总的来说需要你们 6) 打法 1public static void main( 2 system.out.println("daima")  3)  但是这是一个剑所得的开始 非常多的人,都喜欢这样 好吧

Codeforces Round #338 (Div. 2) B. Longtail Hedgehog 记忆化搜索/树DP

B. Longtail Hedgehog This Christmas Santa gave Masha a magic picture and a pencil. The picture consists of n points connected by m segments (they might cross in any way, that doesn't matter). No two segments connect the same pair of points, and no se

UVA 11609 - Anne's game cayley定理

Lily: “Chantarelle was part of my exotic phase.”Bu?y: “It’s nice. It’s a mushroom.”Lily: “It is? That’s really embarrassing.”Bu?y: “Well, it’s an exotic mushroom, if that’s any comfort.”Joss Whedon, "Anne".A little girl whose name is Anne Spetri

donghua--suibian

1.daima import android.view.animation.Animation; import android.view.animation.AnimationSet; import android.view.animation.AnimationUtils; import android.view.animation.RotateAnimation; import android.view.animation.LayoutAnimationController; private

Java综合题目

分支, 循环, 数据类型 1, 题目:有1.2.3.4个数字,能组成多少个互不相同且无重复数字的三位数?都是多少? 2, 题目:有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和. 3, 题目:求1!+2!+3!+...+20!的和(5!表示5的阶乘, 即5*4*3*2*1) 4, 题目:一个5位数,判断它是不是回文数.即12321是回文数,个位与万位相同,十位与千位相同. 5, 题目:先写一个程序, 随机生成一个3*3的矩阵数字(1-9数字全部用

实验二+063+陈彧

一.实验目的 掌握基于覆盖理论与基本路径的基本白盒测试方法和实践 二.实验要求 运用逻辑覆盖测试的覆盖准则设计被测程序的测试用例,并运行测试用例检查程序的正确与否,给出程序缺陷小结. 三.实验内容 根据各位同学自己的被测程序,分别作出各类白盒测试技术的用例设计和相应的Junit脚本. 所有的覆盖的技术:语句覆盖.判定覆盖.条件覆盖.判定/条件覆盖.组合覆盖.路径覆盖,基本路径测试方法. 1) 被测原代码 package Test1; import java.io.IOException; imp