直接上代码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
fig_acc2 = np.zeros([n_epoch]) for epoch in range (n_epoch): start_time = time.time() #training train_loss, train_acc, n_batch = 0 , 0 , 0 for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle = True ): _,err,ac = sess.run([train_op,loss,acc], feed_dict = {x: x_train_a, y_: y_train_a}) train_loss + = err; train_acc + = ac; n_batch + = 1 summary_str = sess.run(merged_summary_op,feed_dict = {x: x_train_a, y_: y_train_a}) summary_writer.add_summary(summary_str, epoch) print ( " train loss: %f" % (np. sum (train_loss) / n_batch)) print ( " train acc: %f" % (np. sum (train_acc) / n_batch)) fig_loss[epoch] = np. sum (train_loss) / n_batch fig_acc1[epoch] = np. sum (train_acc) / n_batch #validation val_loss, val_acc, n_batch = 0 , 0 , 0 for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle = False ): err, ac = sess.run([loss,acc], feed_dict = {x: x_val_a, y_: y_val_a}) val_loss + = err; val_acc + = ac; n_batch + = 1 print ( " validation loss: %f" % (np. sum (val_loss) / n_batch)) print ( " validation acc: %f" % (np. sum (val_acc) / n_batch)) fig_acc2[epoch] = np. sum (val_acc) / n_batch # 训练loss图 fig, ax1 = plt.subplots() lns1 = ax1.plot(np.arange(n_epoch), fig_loss, label = "Loss" ) ax1.set_xlabel( 'iteration' ) ax1.set_ylabel( 'training loss' ) # 训练和验证两种准确率曲线图放在一张图中 fig2, ax2 = plt.subplots() ax3 = ax2.twinx() #由ax2图生成ax3图 lns2 = ax2.plot(np.arange(n_epoch), fig_acc1, label = "Loss" ) lns3 = ax3.plot(np.arange(n_epoch), fig_acc2, label = "Loss" ) ax2.set_xlabel( 'iteration' ) ax2.set_ylabel( 'training acc' ) ax3.set_ylabel( 'val acc' ) # 合并图例 lns = lns3 + lns2 labels = [ "train acc" , "val acc" ] plt.legend(lns, labels, loc = 7 ) plt.show() |
结果:
补充知识:tensorflow2.x实时绘制训练时的损失和准确率
我就废话不多说了,大家还是直接看代码吧!
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
sgd = SGD(lr = float (model_value[ 3 ]), decay = 1e - 6 , momentum = 0.9 , nesterov = True ) model. compile (loss = 'categorical_crossentropy' , optimizer = sgd, metrics = [ 'accuracy' ]) # validation_split:0~1之间的浮点数,用来指定训练集的一定比例数据作为验证集 history = model.fit( self .x_train, self .y_train, batch_size = self .batch_size, epochs = self .epoch_size, class_weight = 'auto' , validation_split = 0.1 ) # 绘制训练 & 验证的准确率值 plt.plot(history.history[ 'accuracy' ]) plt.plot(history.history[ 'val_accuracy' ]) plt.title( 'Model accuracy' ) plt.ylabel( 'Accuracy' ) plt.xlabel( 'Epoch' ) plt.legend([ 'Train' , 'Test' ], loc = 'upper left' ) plt.show() # 绘制训练 & 验证的损失值 plt.plot(history.history[ 'loss' ]) plt.plot(history.history[ 'val_loss' ]) plt.title( 'Model loss' ) plt.ylabel( 'Loss' ) plt.xlabel( 'Epoch' ) plt.legend([ 'Train' , 'Test' ], loc = 'upper left' ) plt.show() print ( "savemodel---------------" ) model.save(os.path.join(model_value[ 0 ], 'model3_3.h5' )) #输出损失和精确度 score = model.evaluate( self .x_test, self .y_test, batch_size = self .batch_size) |
以上这篇在tensorflow下利用plt画论文中loss,acc等曲线图实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/qq_40994943/article/details/86651941