

:param dnn_feature_columns: An iterable containing all the features used by deep part of the model. def DIEN ( dnn_feature_columns, history_feature_list, gru_type = "GRU", use_negsampling = False, alpha = 1.0, use_bn = False, dnn_hidden_units = ( 200, 80 ), dnn_activation = 'relu', att_hidden_units = ( 64, 16 ), att_activation = "dice", att_weight_normalization = True, l2_reg_dnn = 0, l2_reg_embedding = 1e-6, dnn_dropout = 0, seed = 1024, task = 'binary' ): """Instantiates the Deep Interest Evolution Network architecture. subtract ( user_behavior_length, 1 ), stag = "gru" ) # if gru_type = "GRU" : rnn_outputs2 = DynamicGRU ( embedding_size, return_sequence = True, name = "gru2" )() # attention_score = AttentionSequencePoolingLayer(hidden_size=att_hidden_size, activation=att_activation, weight_normalization=att_weight_normalization, return_score=True)() # outputs = Lambda(lambda x: tf.matmul(x, x))( # ) # hist = outputs hist = AttentionSequencePoolingLayer ( att_hidden_units = att_hidden_size, att_activation = att_activation, weight_normalization = att_weight_normalization, return_score = False )() else : # AIGRU AGRU AUGRU scores = AttentionSequencePoolingLayer ( att_hidden_units = att_hidden_size, att_activation = att_activation, weight_normalization = att_weight_normalization, return_score = True )() if gru_type = "AIGRU" : hist = multiply ()( scores )]) final_state2 = DynamicGRU ( embedding_size, gru_type = "GRU", return_sequence = False, name = 'gru2' )( ) else : # AGRU AUGRU final_state2 = DynamicGRU ( embedding_size, gru_type = gru_type, return_sequence = False, name = 'gru2' )()( scores )]) hist = final_state2 return hist, aux_loss_1 log ( 1.0 - noclick_prop_ ), ]) * mask loss_ = reduce_mean ( click_loss_ + noclick_loss_ ) return loss_ def interest_evolution ( concat_behavior, deep_input_item, user_behavior_length, gru_type = "GRU", use_neg = False, neg_concat_behavior = None, att_hidden_size = ( 64, 16 ), att_activation = 'sigmoid', att_weight_normalization = False, ): if gru_type not in : raise ValueError ( "gru_type error " ) aux_loss_1 = None embedding_size = None rnn_outputs = DynamicGRU ( embedding_size, return_sequence = True, name = "gru1" )() if gru_type = "AUGRU" and use_neg : aux_loss_1 = auxiliary_loss ( rnn_outputs, concat_behavior, neg_concat_behavior, tf. log ( 1.0 - noclick_prop_ ), ]) * mask except : noclick_loss_ = - \ log ( click_prop_ ), ]) * mask try : noclick_loss_ = - \ log ( click_prop_ ), ]) * mask except : click_loss_ = - tf. concat (, - 1 ) auxiliary_nn = DNN (, activation = 'sigmoid' ) click_prop_ = auxiliary_nn ( click_input_, stag = stag ) noclick_prop_ = auxiliary_nn ( noclick_input_, stag = stag ) # try : click_loss_ = - tf.

sequence_mask ( mask, hist_len ) mask = mask mask = tf. layers.utils import concat_func, reduce_mean, combined_dnn_input def auxiliary_loss ( h_states, click_seq, noclick_seq, mask, stag = None ): #:param h_states: #:param click_seq: #:param noclick_seq: #:param mask:#:param stag: #:return: hist_len, _ = click_seq. quence import AttentionSequencePoolingLayer, DynamicGRU from. inputs import get_varlen_pooling_list, create_embedding_matrix, embedding_lookup, varlen_embedding_lookup, \ feature_column import SparseFeat, VarLenSparseFeat, DenseFeat, build_input_features from. () """ import tensorflow as tf from import ( Concatenate, Dense, Permute, multiply ) from. Deep Interest Evolution Network for Click-Through Rate Prediction. # -*- coding:utf-8 -*- """ Author: Weichen Shen, Reference: Zhou G, Mou N, Fan Y, et al.
