#!/usr/bin/python3 # -*- coding: utf-8 -*- #   import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import sys import time import specrep t = time.localtime() print('executed at {:4}/{:02}/{:02} {:02}:{:02}:{:02}'.format(t.tm_year,t.tm_mon,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec)) print('python version:', sys.version) print('numpy version:', np.__version__) print('tensorflow version: ', tf.__version__) if (not tf.__version__.startswith('1.')): print('This code requires version 1.*.') exit() print('\nPrepare the training dateset.') train_t = np.array([5.2, 5.7, 8.6, 14.9, 18.2, 20.4, 25.5, 26.4, 22.8, 17.5, 11.1, 6.6]) train_t = train_t.reshape([12, 1]) train_x = np.array([[mon**n for n in range(0, 5)] for mon in range(1, 13)]) print('train_x =', train_x) print('train_t =', train_t) # x を5次元の入力データとみなし、12組の(x,t)のサンプルと理解する print('\nDefine a placeholder to hold the training dataset.') x = tf.compat.v1.placeholder(tf.float32, [None, 5]) print('x =', x) print('\nDefine variables for weights.') w = tf.Variable(tf.zeros([5, 1])) print('w =', w) print('\nDefine a ploynomial to predict label values.') y = tf.matmul(x, w) print('y =', y) print('y.op.inputs._inputs[0] is x:', y.op.inputs._inputs[0] is x) print('y.op.inputs._inputs[1] is w:', y.op.inputs._inputs[1] is w) print('y.op.inputs._inputs[1].op.inputs[0] is w:', y.op.inputs._inputs[1].op.inputs[0] is w) # 行列の乗算を実行しているわけではなく、式を登録している print('\nDefine a placeholder to hold the label values.') t = tf.compat.v1.placeholder(tf.float32, [None, 1]) print('t =', t) print('\nDefine a loss function as Square error.') loss = tf.reduce_sum(tf.square(y-t)) print('loss =', loss) print('\nDefine an optimization step using the Adam optimizer') train_step = tf.compat.v1.train.AdamOptimizer().minimize(loss) print('train_step =', train_step) print('\nCreate a session and initialize variables.') sess = tf.compat.v1.Session() sess.run(tf.compat.v1.global_variables_initializer()) # 実行結果 = session.run(コマンド相当, param=....) で文脈依存 print('\nApply the optimization step for 100,000 epochs.') time_start = time.perf_counter() i = 0 for _ in range(100000): i += 1 sess.run(train_step, feed_dict={x: train_x, t: train_t}) if i % 10000 == 0: loss_val = sess.run(loss, feed_dict={x: train_x, t: train_t}) print('Step: {}, Loss: {}'.format(i, loss_val)) time_end = time.perf_counter() print('time elapsed =', time_end-time_start) print('\nShow weights after the training.') ws = sess.run(w) print('ws =', ws) print('\nDefine a function to predict values using the final weights.') def predict(x): pred = sum([ws[n][0] * x**n for n in range(0, 5)]) return pred print('\nPlot a chart for predictions.') fig = plt.figure() subplot = fig.add_subplot(1, 1, 1) subplot.set_xlim(1, 12) subplot.set_xticks(range(1, 13)) subplot.scatter(range(1, 13), train_t) xs = np.linspace(1, 12, 100) ys = predict(xs) subplot.plot(xs, ys) plt.savefig('ex1-1.png') # plt.show() print('\nLog of computer spec.') specrep.report()