我就废话不多说了,大家还是直接看代码吧~
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
|
import keras from sklearn.model_selection import train_test_split import tensorflow as tf from keras.callbacks import ModelCheckpoint,Callback # import keras.backend as K from keras.layers import * from keras.models import Model from keras.optimizers import SGD, RMSprop, Adagrad,Adam from keras.models import * from keras.metrics import * from keras import backend as K from keras.regularizers import * from keras.metrics import categorical_accuracy # from keras.regularizers import activity_l1 #通过L1正则项,使得输出更加稀疏 from keras_contrib.layers import CRF from visual_callbacks import AccLossPlotter plotter = AccLossPlotter(graphs = [ 'acc' , 'loss' ], save_graph = True , save_graph_path = sys.path[ 0 ]) # from crf import CRFLayer,create_custom_objects class LossHistory(Callback): def on_train_begin( self , logs = {}): self .losses = [] def on_batch_end( self , batch, logs = {}): self .losses.append(logs.get( 'loss' )) # def on_epoch_end(self, epoch, logs=None): word_input = Input (shape = (max_len,), dtype = 'int32' , name = 'word_input' ) word_emb = Embedding( len (char_value_dict) + 2 , output_dim = 64 , input_length = max_len, dropout = 0.2 , name = 'word_emb' )(word_input) bilstm = Bidirectional(LSTM( 32 , dropout_W = 0.1 , dropout_U = 0.1 , return_sequences = True ))(word_emb) bilstm_d = Dropout( 0.1 )(bilstm) half_window_size = 2 paddinglayer = ZeroPadding1D(padding = half_window_size)(word_emb) conv = Conv1D(nb_filter = 50 , filter_length = ( 2 * half_window_size + 1 ), border_mode = 'valid' )(paddinglayer) conv_d = Dropout( 0.1 )(conv) dense_conv = TimeDistributed(Dense( 50 ))(conv_d) rnn_cnn_merge = merge([bilstm_d, dense_conv], mode = 'concat' , concat_axis = 2 ) dense = TimeDistributed(Dense(class_label_count))(rnn_cnn_merge) crf = CRF(class_label_count, sparse_target = False ) crf_output = crf(dense) model = Model( input = [word_input], output = [crf_output]) model. compile (loss = crf.loss_function, optimizer = 'adam' , metrics = [crf.accuracy]) model.summary() # serialize model to JSON model_json = model.to_json() with open ( "model.json" , "w" ) as json_file: json_file.write(model_json) #编译模型 # model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc',]) # 用于保存验证集误差最小的参数,当验证集误差减少时,立马保存下来 checkpointer = ModelCheckpoint(filepath = "bilstm_1102_k205_tf130.w" , verbose = 0 , save_best_only = True , save_weights_only = True ) #save_weights_only=True history = LossHistory() history = model.fit(x_train, y_train, batch_size = 32 , epochs = 500 , #validation_data = ([x_test, seq_lens_test], y_test), callbacks = [checkpointer, history, plotter], verbose = 1 , validation_split = 0.1 , ) |
补充知识:keras训练模型使用自定义CTC损失函数,重载模型时报错解决办法
使用keras训练模型,用到了ctc损失函数,需要自定义损失函数如下:
self.ctc_model.compile(loss={'ctc': lambda y_true, output: output}, optimizer=opt)
其中loss为自定义函数,使用字典{‘ctc': lambda y_true, output: output}
训练完模型后需要重载模型,如下:
from keras.models import load_model
model=load_model('final_ctc_model.h5')
报错:
Unknown loss function : <lambda>
由于是自定义的损失函数需要加参数custom_objects,这里需要定义字典{'': lambda y_true, output: output},正确代码如下:
model=load_model('final_ctc_model.h5',custom_objects={'<lambda>': lambda y_true, output: output})
可能是因为要将自己定义的loss函数加入到keras函数里
在这之前试了很多次,如果用lambda y_true, output: output定义loss
函数字典名只能是'<lambda>',不能是别的字符
如果自定义一个函数如loss_func作为loss函数如:
self.ctc_model.compile(loss=loss_func, optimizer=opt)
可以在重载时使用
am=load_model('final_ctc_model.h5',custom_objects={'loss_func': loss_func})
此时注意字典名和函数名要相同
以上这篇使用keras实现BiLSTM+CNN+CRF文字标记NER就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/xinfeng2005/article/details/78485748