100字范文,内容丰富有趣,生活中的好帮手!
100字范文 > 阿里云天池大赛赛题(深度学习)——人工智能辅助构建知识图谱(完整代码)

阿里云天池大赛赛题(深度学习)——人工智能辅助构建知识图谱(完整代码)

时间:2023-01-19 10:07:11

相关推荐

阿里云天池大赛赛题(深度学习)——人工智能辅助构建知识图谱(完整代码)

# 导入所需文件import numpy as npfrom sklearn.model_selection import ShuffleSplitfrom data_utils import ENTITIES, Documents, Dataset, SentenceExtractor, make_predictionsfrom data_utils import Evaluatorfrom gensim.models import Word2Vec

# 数据文件读取data_dir = "./data/train"ent2idx = dict(zip(ENTITIES, range(1, len(ENTITIES) + 1)))idx2ent = dict([(v, k) for k, v in ent2idx.items()])

# 训练集,测试集切分与打乱docs = Documents(data_dir=data_dir)rs = ShuffleSplit(n_splits=1, test_size=20, random_state=)train_doc_ids, test_doc_ids = next(rs.split(docs))train_docs, test_docs = docs[train_doc_ids], docs[test_doc_ids]

# 模型参数赋值num_cates = max(ent2idx.values()) + 1sent_len = 64vocab_size = 3000emb_size = 100sent_pad = 10sent_extrator = SentenceExtractor(window_size=sent_len, pad_size=sent_pad)train_sents = sent_extrator(train_docs)test_sents = sent_extrator(test_docs)train_data = Dataset(train_sents, cate2idx=ent2idx)train_data.build_vocab_dict(vocab_size=vocab_size)test_data = Dataset(test_sents, word2idx=train_data.word2idx, cate2idx=ent2idx)vocab_size = len(train_data.word2idx)

# 构建词嵌入模型w2v_train_sents = []for doc in docs:w2v_train_sents.append(list(doc.text))w2v_model = Word2Vec(w2v_train_sents, size=emb_size)w2v_embeddings = np.zeros((vocab_size, emb_size))for char, char_idx in train_data.word2idx.items():if char in w2v_model.wv:w2v_embeddings[char_idx] = w2v_model.wv[char]

# 构建双向长短时记忆模型模型加crf模型import kerasfrom keras.layers import Input, LSTM, Embedding, Bidirectionalfrom keras_contrib.layers import CRFfrom keras.models import Modeldef build_lstm_crf_model(num_cates, seq_len, vocab_size, model_opts=dict()):opts = {'emb_size': 256,'emb_trainable': True,'emb_matrix': None,'lstm_units': 256,'optimizer': keras.optimizers.Adam()}opts.update(model_opts)input_seq = Input(shape=(seq_len,), dtype='int32')if opts.get('emb_matrix') is not None:embedding = Embedding(vocab_size, opts['emb_size'], weights=[opts['emb_matrix']],trainable=opts['emb_trainable'])else:embedding = Embedding(vocab_size, opts['emb_size'])x = embedding(input_seq)lstm = LSTM(opts['lstm_units'], return_sequences=True)x = Bidirectional(lstm)(x)crf = CRF(num_cates, sparse_target=True)output = crf(x)model = Model(input_seq, output)pile(opts['optimizer'], loss=crf.loss_function, metrics=[crf.accuracy])return model

# 双向长短时记忆模型+CRF条件随机场实例化seq_len = sent_len + 2 * sent_padmodel = build_lstm_crf_model(num_cates, seq_len=seq_len, vocab_size=vocab_size, model_opts={'emb_matrix': w2v_embeddings, 'emb_size': 100, 'emb_trainable': False})model.summary()

# 训练集,测试集形状train_X, train_y = train_data[:]print('train_X.shape', train_X.shape)print('train_y.shape', train_y.shape)

# 双向长短时记忆模型与条件随机场模型训练model.fit(train_X, train_y, batch_size=64, epochs=10)

# 模型预测test_X, _ = test_data[:]preds = model.predict(test_X, batch_size=64, verbose=True)pred_docs = make_predictions(preds, test_data, sent_pad, docs, idx2ent)

# 输出评价指标f_score, precision, recall = Evaluator.f1_score(test_docs, pred_docs)print('f_score: ', f_score)print('precision: ', precision)print('recall: ', recall)

# 测试样本展示sample_doc_id = list(pred_docs.keys())[3]test_docs[sample_doc_id]

# 测试结果展示pred_docs[sample_doc_id]

以上代码全部来自于《阿里云天池大赛赛题解析(深度学习篇)》这本好书,十分推荐大家去阅读原书!

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。