Ginger Zeropeak 2021-05-18 19:34 采纳率: 0%
浏览 55

怎么让这部分模型跑起来啊?代码怎么写

import os
import tensorflow as tf
import _pickle as pickle

# Hyper-parameters
users_num = 6040
movies_num = 3883
# user attributes
gender_type = 2
age_type = 7
occupy_type = 21
prefer_type = 18
bench_type = 2
# item attributes
genres_type = 18
# embeddings dimension
D = 48
D2 = int(D * D)
# L2 / lr decay
weight_decay = 0.1
learning_decay_steps = 10
learning_decay_rate = 1.0
# bn / dropout
is_dropout = False
is_bn = True

project_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
dataset = "ml-1m"
dataset_path = os.path.join("D:\\", "1RecSys", "DataSets", dataset)

def weight(tensor_shape, in_num, name=None):
    return tf.Variable(tf.random.truncated_normal(shape=tensor_shape, stddev=tf.sqrt(2.0 / in_num)), name=name)


def bias(tensor_shape, name):
    return tf.Variable(tf.zeros(shape=tensor_shape), name=name)


def act_func(input_tensor, keep_prob=1.0, dropout_opt=False, bn_opt=False,  func=tf.nn.tanh, is_train=True):
    if not dropout_opt and not bn_opt:
        return func(input_tensor)
    elif dropout_opt and not bn_opt:
        return tf.nn.dropout(func(input_tensor), keep_prob=keep_prob)
    elif not dropout_opt and bn_opt:
        bn_output = tf.layers.batch_normalization(input_tensor, training=is_train)
        return func(bn_output)
    else:
        raise ValueError("Don\'t choose dropout and batch normalization at the same time !")


def input_embedding_layers(nn_input, scope, keep_prob=1.0, is_train=True):
    with tf.name_scope(scope):
        input_l1w = weight([movies_num, 512], movies_num, 'layer1_weight')
        input_l1b = bias([512], 'layer1_bias')
        l1 = tf.matmul(nn_input, input_l1w) + input_l1b
        l1_output = act_func(l1, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        input_l2w = weight([512, 256], 512, 'layer2_weight')
        input_l2b = bias([256], 'layer2_bias')
        l2 = tf.matmul(l1_output, input_l2w) + input_l2b
        l2_output = act_func(l2, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        input_l3w = weight([256, D2], 256, name='layer3_weight')
        input_l3b = bias([D2], 'layer3_bias')
        l3 = tf.matmul(l2_output, input_l3w) + input_l3b
        input_embedding = act_func(l3, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        stack_input = tf.reshape(tf.expand_dims(input_embedding, axis=1), [-1, D, D])
        return stack_input


def user_feat_embedding(scope, gender, age, occupy, prefer, bench, keep_prob=1.0, is_train=True):
    with tf.name_scope(scope):
        # embedding block - user gender + age
        gender_im_w = weight([1, gender_type], gender_type, 'gender_identity_mapping_w')
        gender_im_b = bias([gender_type], 'gender_identity_mapping_b')
        gender_im = tf.multiply(gender, gender_im_w) + gender_im_b

        age_im_w = weight([1, age_type], age_type, 'age_identity_mapping_w')
        age_im_b = bias([age_type], 'age_identity_mapping_b')
        age_im = tf.multiply(age, age_im_w) + age_im_b
        gender_age = tf.concat([gender_im, age_im], axis=1)

        ub1_l1w = weight([gender_type + age_type, D], gender_type + age_type, 'ub1_l1w')
        ub1_l1b = bias([D], 'ub1_l1b')
        ub1_l1 = tf.matmul(gender_age, ub1_l1w) + ub1_l1b
        ub1_l1_output = act_func(ub1_l1, bn_opt=is_bn, is_train=is_train)
        ub1_l2w = weight([D, D], D, 'ub1_l2w')
        ub1_l2b = bias([D], 'ub1_l2b')
        ub1_l2 = tf.matmul(ub1_l1_output, ub1_l2w) + ub1_l2b
        ub1_l2_output = act_func(ub1_l2, bn_opt=is_bn, is_train=is_train)
        # embedding block - user occupation
        occupy_im_w = weight([1, occupy_type], occupy_type, 'occupy_identity_mapping_w')
        occupy_im_b = bias([occupy_type], 'occupy_identity_mapping_b')
        occupy_im = tf.multiply(occupy, occupy_im_w) + occupy_im_b

        ub2_l1w = weight([occupy_type, D], occupy_type, 'ub2_l1w')
        ub2_l1b = bias([D], 'ub2_l1b')
        ub2_l1 = tf.matmul(occupy_im, ub2_l1w) + ub2_l1b
        ub2_l1_output = act_func(ub2_l1, bn_opt=is_bn, is_train=is_train)
        ub2_l2w = weight([D, D], D, 'ub2_l2w')
        ub2_l2b = bias([D], 'ub2_l2b')
        ub2_l2 = tf.matmul(ub2_l1_output, ub2_l2w) + ub2_l2b
        ub2_l2_output = act_func(ub2_l2, bn_opt=is_bn, is_train=is_train)
        # embedding block - user preference
        ub3_l1w = weight([prefer_type, D], prefer_type, 'ub3_l1w')
        ub3_l1b = bias([D], 'ub3_l1b')
        ub3_l1 = tf.matmul(prefer, ub3_l1w) + ub3_l1b
        ub3_l1_output = act_func(ub3_l1, bn_opt=is_bn, is_train=is_train)
        ub3_l2w = weight([D, D], D, 'ub3_l2w')
        ub3_l2b = bias([D], 'ub3_l2b')
        ub3_l2 = tf.matmul(ub3_l1_output, ub3_l2w) + ub3_l2b
        ub3_l2_output = act_func(ub3_l2, bn_opt=is_bn, is_train=is_train)
        ub3_l3w = weight([D, D], D, 'ub3_l3w')
        ub3_l3b = bias([D], 'ub3_l3b')
        ub3_l3 = tf.matmul(ub3_l2_output, ub3_l3w) + ub3_l3b
        ub3_l3_output = act_func(ub3_l3, bn_opt=is_bn, is_train=is_train)
        # embedding block 4 - user ratings mean & std
        ub4_l1w = weight([bench_type, int(D / 2)], bench_type, 'ub4_l1w')
        ub4_l1b = bias([int(D / 2)], 'ub4_l1b')
        ub4_l1 = tf.matmul(bench, ub4_l1w) + ub4_l1b
        ub4_l1_output = act_func(ub4_l1, bn_opt=is_bn, is_train=is_train)
        ub4_l2w = weight([int(D / 2), D], int(D / 2), 'ub4_l2w')
        ub4_l2b = bias([D], 'ub4_l2b')
        ub4_l2 = tf.matmul(ub4_l1_output, ub4_l2w) + ub4_l2b
        ub4_l2_output = act_func(ub4_l2, bn_opt=is_bn, is_train=is_train)
        # user embeddings concatenation
        block_ue1_exp = tf.expand_dims(ub1_l2_output, axis=1)
        block_ue2_exp = tf.expand_dims(ub2_l2_output, axis=1)
        block_ue3_exp = tf.expand_dims(ub3_l3_output, axis=1)
        block_ue4_exp = tf.expand_dims(ub4_l2_output, axis=1)
        user_embeddings = tf.concat([block_ue1_exp, block_ue2_exp, block_ue3_exp, block_ue4_exp], axis=1)
        return user_embeddings


def item_feat_embedding(scope, genres, bench, keep_prob=1.0, is_train=True):
    with tf.name_scope(scope):
        # embedding block - item genres
        ib1_l1w = weight([genres_type, D], genres_type, 'ib1_l1w')
        ib1_l1b = bias([D], 'ub1_l1b')
        ib1_l1 = tf.matmul(genres, ib1_l1w) + ib1_l1b
        ib1_l1_output = act_func(ib1_l1, bn_opt=is_bn, is_train=is_train)
        ib1_l2w = weight([D, D], D, 'ib1_l2w')
        ib1_l2b = bias([D], 'ib1_l2b')
        ib1_l2 = tf.matmul(ib1_l1_output, ib1_l2w) + ib1_l2b
        ib1_l2_output = act_func(ib1_l2, bn_opt=is_bn, is_train=is_train)
        ib1_l3w = weight([D, D], D, 'ib1_l3w')
        ib1_l3b = bias([D], 'ib1_l3b')
        ib1_l3 = tf.matmul(ib1_l2_output, ib1_l3w) + ib1_l3b
        ib1_l3_output = act_func(ib1_l3, bn_opt=is_bn, is_train=is_train)
        # embedding block - item mean & std
        ib2_l1w = weight([bench_type, int(D / 2)], bench_type, 'ib1_l1w')
        ib2_l1b = bias([int(D / 2)], 'ib2_l1b')
        ib2_l1 = tf.matmul(bench, ib2_l1w) + ib2_l1b
        ib2_l1_output = act_func(ib2_l1, bn_opt=is_bn, is_train=is_train)
        ib2_l2w = weight([int(D / 2), D], int(D / 2), 'ib2_l2w')
        ib2_l2b = bias([D], 'ib2_l2b')
        ib2_l2 = tf.matmul(ib2_l1_output, ib2_l2w) + ib2_l2b
        ib2_l2_output = act_func(ib2_l2, bn_opt=is_bn, is_train=is_train)
        # item embeddings concatenation
        block_ie1_exp = tf.expand_dims(ib1_l3_output, axis=1)
        block_ie2_exp = tf.expand_dims(ib2_l2_output, axis=1)
        item_embeddings = tf.concat([block_ie1_exp, block_ie2_exp], axis=1)
        return item_embeddings


def attention_block(input_embedding, feat):
    feat_trans = tf.transpose(feat, [0, 2, 1])
    attention = tf.nn.softmax(tf.matmul(input_embedding, feat_trans))
    attentive_input = tf.matmul(attention, feat)
    return attentive_input


def attention_combine(layer_1, layer_2, layer_3, layer_4, channel=2):
    if channel == 2:
        layer_1 = tf.expand_dims(layer_1, axis=1)
        layer_2 = tf.expand_dims(layer_2, axis=1)
        combine = tf.concat([layer_1, layer_2], axis=1)
        return combine
    elif channel == 4:
        layer_1 = tf.expand_dims(layer_1, axis=1)
        layer_2 = tf.expand_dims(layer_2, axis=1)
        layer_3 = tf.expand_dims(layer_3, axis=1)
        layer_4 = tf.expand_dims(layer_4, axis=1)
        combine = tf.concat([layer_1, layer_2, layer_3, layer_4], axis=1)
        return combine
    else:
        raise ValueError("Parm channel only with value 2 or 4")


def interaction_cube(scope, att_input, channel, keep_prob=1.0, is_train=True):
    with tf.name_scope(scope):
        if isinstance(channel, int) and channel >= 1:
            conv_kernal = weight([1, channel, D, D], channel * D2, 'kernal')
            conv_bias = bias([channel, D, D], 'conv_bias')
            conv_rer = tf.multiply(att_input, conv_kernal) + conv_bias
            if channel >= 2:
                conv_output = tf.transpose(act_func(conv_rer, bn_opt=is_bn, is_train=is_train), [0, 2, 3, 1])
                squeeze_kernal = tf.reshape(weight([channel, D, D], channel * D2, 'squeeze'), [channel * D, D])
                squeeze_bias = bias([D, D], 'sq_bias')
                sq_re = tf.reshape(tf.matmul(tf.reshape(conv_output, [-1, channel * D]), squeeze_kernal), [-1, D, D]) + squeeze_bias
                return sq_re
            else:
                return tf.reshape(conv_rer, [-1, D, D])


def ui_interaction(i_latent_arr, u_latent_arr, users_count, pos_idx):
    i_sp = tf.split(i_latent_arr, pos_idx)
    u_sp = tf.split(u_latent_arr, users_count)
    ui = []
    # for t in range(users_count):
    #     tempt = tf.map_fn(fn=lambda item: tf.matmul(item, tf.reshape(u_sp[t], [D, D]), transpose_b=True),
    #                       elems=i_sp[t],
    #                       dtype=tf.float32)
    #     ui.append(tempt)
    for t in range(users_count):
        tempt = tf.matmul(tf.reshape(i_sp[t], [-1, D]), tf.reshape(u_sp[t], [D, D]), transpose_b=True)
        ui.append(tf.reshape(tempt, [-1, D, D]))
    return tf.concat(ui, axis=0)


def layer_output(input_tensor, w, b, is_train):
    re = tf.matmul(input_tensor, w) + b
    return act_func(re, is_train=is_train)


def item_att(i_att_input, keep_prob=1.0, is_train=True):
    att_input = tf.reshape(i_att_input, [-1, 2 * D])
    with tf.name_scope('item_branch'):
        l1_w = weight([2 * D, 4 * D], 2 * D, 'l1_w')
        l1_b = bias([4 * D], 'l1_b')
        l1_output = layer_output(att_input, l1_w, l1_b, is_train)
        l2_w = weight([4 * D, 8 * D], 4 * D, 'l2_w')
        l2_b = bias([8 * D], 'l2_b')
        l2_output = layer_output(l1_output, l2_w, l2_b, is_train)
        l3_w = weight([8 * D, int(D2 / 2)], 8 * D, 'l3_w')
        l3_b = bias([int(D2 / 2)], 'l3_b')
        l3_output = layer_output(l2_output, l3_w, l3_b, is_train)
        l4_w = weight([int(D2 / 2), D2], int(D2 / 2), 'l4_w')
        l4_b = bias(D2, 'l4_b')
        l4_output = layer_output(l3_output, l4_w, l4_b, is_train)
        return tf.reshape(l4_output, [-1, D, D])


def MLP(ui_latent, keep_prob=1.0, is_train=True):
    with tf.name_scope('mlp'):
        l1_w = weight([D2, int(D2 / 2)], D2, 'l1_w')
        l1_b = bias([int(D2 / 2)], 'l1_b')
        l1 = tf.matmul(ui_latent, l1_w) + l1_b
        l1_output = act_func(l1, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        l2_w = weight([int(D2 / 2), int(D2 / 4)], int(D2 / 2), 'l2_w')
        l2_b = bias([int(D2 / 4)], 'l2_b')
        l2 = tf.matmul(l1_output, l2_w) + l2_b
        l2_output = act_func(l2, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        # l3_w = weight([int(D2 / 8), int(D2 / 8)], int(D2 / 8), 'l3_w')
        # l3_b = bias([int(D2 / 8)], 'l3_b')
        # l3 = tf.matmul(l2_output, l3_w) + l3_b
        # l3_output = act_func(l3, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        l4_w = weight([int(D2 / 4), 5], int(D2 / 4), 'l4_w')
        l4_b = bias(5, 'l4_b')
        l4 = tf.matmul(l2_output, l4_w) + l4_b
        l4_output = act_func(l4, keep_prob=keep_prob, dropout_opt=is_dropout, bn_opt=is_bn, is_train=is_train)
        return l4_output


class AHCCN:
    def __init__(self, batch_size, is_eval):
        # Parameters for AHCCN
        self.global_steps = tf.compat.v1.placeholder(tf.int64, name='global_steps')
        # self.batch_size = tf.placeholder(tf.int64, name='batch_size')
        self.batch_size = batch_size
        self.is_eval = is_eval
        self.keep_prob = tf.compat.v1.placeholder(tf.float32, name='dropout_keep_prob')
        self.learning_rate = tf.compat.v1.placeholder(tf.float32, name='learning_rate')
        self.split_pos = tf.compat.v1.placeholder(tf.int64, [batch_size], name='split_index')
        self.is_train = tf.compat.v1.placeholder(tf.bool, name='train_signal')
        # Variables for users
        self.gender = tf.compat.v1.placeholder(tf.float32, [None, gender_type], name='user_gender')  # one-hot
        self.age = tf.compat.v1.placeholder(tf.float32, [None, age_type], name='user_age')  # one-hot
        self.occupy = tf.compat.v1.placeholder(tf.float32, [None, occupy_type], name='user_occupation')  # one-hot
        self.prefer = tf.compat.v1.placeholder(tf.float32, [None, prefer_type], name='user_preference')  # multi-hot
        self.bench_u = tf.compat.v1.placeholder(tf.float32, [None, bench_type], name='user_mean_std')  # dense
        # self.implicit_dense_tensor = tf.placeholder(tf.float32, [users_num, movies_num],
        #                                             name='u-i_implicit_tensor')
        self.ex_vals = tf.compat.v1.placeholder(tf.float32, name='u-i_explicit_sparse_tensor_values')
        self.ex_idx = tf.compat.v1.placeholder(tf.int64, [None, 2], name='u-i_explicit_sparse_tensor_index')
        self.explicit_sparse_tensor = tf.SparseTensor(indices=self.ex_idx,
                                                      values=self.ex_vals,
                                                      dense_shape=[self.batch_size, movies_num])
        self.explicit_input_u = tf.sparse.to_dense(self.explicit_sparse_tensor, name='u-i_explicit_tensor')
        # self.explicit_input_u = tf.placeholder(tf.float32, [None, movies_num], name='u-i_explicit_tensor')
        # self.user_idx = tf.placeholder(tf.int64)

        # self.explicit_input_u = tf.reshape(tf.sparse_to_dense(sparse_indices=self.ex_idx,
        #                                                       output_shape=[movies_num],
        #                                                       sparse_values=self.ex_vals,
        #                                                       ),
        #                                    shape=[1, -1], name='u-i_implicit_tensor')
        self.implicit_input_u = tf.compat.v1.placeholder(tf.float32, [None, movies_num], name='u-i_implicit_tensor')

        # Variables for items
        self.genres = tf.compat.v1.placeholder(tf.float32, [None, genres_type], name='item_genres')  # multi-hot
        self.bench_i = tf.compat.v1.placeholder(tf.float32, [None, bench_type], name='item_mean_std')  # dense

        # self.item_idx = tf.placeholder(tf.int64)
        # self.explicit_input_i = tf.transpose(tf.gather(self.explicit_dense_tensor, self.item_idx, axis=1))
        # self.implicit_input_i = tf.transpose(tf.gather(self.implicit_dense_tensor, self.item_idx, axis=1))

        self.targets = tf.compat.v1.placeholder(tf.float32, [None, 5], name='ratings_softmax_targets')
        # Embedding Modules
        self.build_graph()
        self.pred = self.build_graph()
        self.logits = tf.nn.softmax(self.pred)

        # self.reg2_loss = reg2()
        # self.total_loss = self.reg2_loss + self.logloss

        self.train_op()
        # self.learning_rate_decay = tf.train.exponential_decay(self.learning_rate, self.global_steps,
        #                                                       learning_decay_steps, learning_decay_rate, staircase=True)
        # self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.total_loss)
        self.init_op = tf.compat.v1.global_variables_initializer()
        self.saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.global_variables(), max_to_keep=10)

    def build_graph(self):
        u_feat_e1 = user_feat_embedding('u_feat_e1', self.gender, self.age, self.occupy, self.prefer, self.bench_u,
                                        is_train=self.is_train)
        i_feat_e1 = item_feat_embedding('i_feat_e1', self.genres, self.bench_i, is_train=self.is_train)

        u_ex_input_embedding = input_embedding_layers(self.explicit_input_u, 'u_ex', keep_prob=self.keep_prob,
                                                      is_train=self.is_train)
        u_im_input_embedding = input_embedding_layers(self.implicit_input_u, 'u_im', keep_prob=self.keep_prob,
                                                      is_train=self.is_train)
        # Depth 1
        u_ex_att_input = attention_block(u_ex_input_embedding, u_feat_e1)
        u_im_att_input = attention_block(u_im_input_embedding, u_feat_e1)
        u_conv_input1 = attention_combine(u_ex_att_input, u_ex_input_embedding,
                                          u_im_att_input, u_im_input_embedding,
                                          channel=4)
        u_cube_1 = interaction_cube('u_cube_1', u_conv_input1, channel=4, is_train=self.is_train)
        # Depth 2
        u_feat_e2 = user_feat_embedding('u_feat_e2', self.gender, self.age, self.occupy, self.prefer, self.bench_u,
                                        is_train=self.is_train)
        u_cube_1_att = attention_block(u_cube_1, u_feat_e2)
        u_conv_input2 = attention_combine(u_cube_1_att, u_cube_1, [], [], channel=2)
        u_cube_2 = interaction_cube('u_cube_2', u_conv_input2, channel=2, is_train=self.is_train)
        # Depth 3
        # u_feat_e3 = user_feat_embedding('u_feat_e3', self.gender, self.age, self.occupy, self.prefer, self.bench_u,
        #                                 is_train=self.is_train)
        # u_cube_2_att = attention_block(u_cube_2, u_feat_e3)
        # u_conv_input3 = attention_combine(u_cube_2_att, u_cube_2, [], [], channel=2)
        u_cube_3 = interaction_cube('u_cube_3', u_cube_2, channel=1, is_train=self.is_train)

        u_latent_arr = u_cube_3
        i_latent_arr = item_att(i_feat_e1, is_train=self.is_train)
        ui_latent_interaction = ui_interaction(i_latent_arr, u_latent_arr, self.batch_size, self.split_pos)
        pred = MLP(tf.reshape(ui_latent_interaction, [-1, D * D]), keep_prob=self.keep_prob, is_train=self.is_train)
        return pred


    def train_op(self):
        if not self.is_eval:
            self.logloss = tf.reduce_mean(tf.reduce_sum(- self.targets * tf.math.log(tf.clip_by_value(self.logits, 1e-10, 1.0)),
                                                        axis=1))
            self.correct_prediction = tf.equal(tf.argmax(self.targets, 1), tf.argmax(self.logits, 1))
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
            self.total_loss = self.logloss
            self.learning_rate_decay = tf.compat.v1.train.exponential_decay(self.learning_rate, self.global_steps,
                                                                  learning_decay_steps, learning_decay_rate, staircase=True)
            # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
            update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                self.optimizer = tf.compat.v1.train.AdagradOptimizer(self.learning_rate).minimize(self.total_loss)
        else:
            pass







  • 写回答

2条回答 默认 最新

  • CSDN专家-黄老师 2021-05-18 19:45
    关注

    先梳理函数与类之间的关系,谁调用谁,谁实例化了谁,这样才能知道代码实现了什么

    评论

报告相同问题?

悬赏问题

  • ¥15 HFSS 中的 H 场图与 MATLAB 中绘制的 B1 场 部分对应不上
  • ¥15 如何在scanpy上做差异基因和通路富集?
  • ¥20 关于#硬件工程#的问题,请各位专家解答!
  • ¥15 关于#matlab#的问题:期望的系统闭环传递函数为G(s)=wn^2/s^2+2¢wn+wn^2阻尼系数¢=0.707,使系统具有较小的超调量
  • ¥15 FLUENT如何实现在堆积颗粒的上表面加载高斯热源
  • ¥30 截图中的mathematics程序转换成matlab
  • ¥15 动力学代码报错,维度不匹配
  • ¥15 Power query添加列问题
  • ¥50 Kubernetes&Fission&Eleasticsearch
  • ¥15 報錯:Person is not mapped,如何解決?