Skip to content

Commit

Permalink
resolve new requirements
Browse files Browse the repository at this point in the history
  • Loading branch information
Zhang-Zhenning committed Jul 27, 2022
1 parent 66465ee commit b52d097
Show file tree
Hide file tree
Showing 10 changed files with 575 additions and 809 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
batch_size=1
maxlen=100

data_location='data'
data_location='../data'
test_file = os.path.join(data_location, "local_test_splitByUser")
uid_voc = os.path.join(data_location, "uid_voc.pkl")
mid_voc = os.path.join(data_location, "mid_voc.pkl")
Expand Down Expand Up @@ -93,7 +93,6 @@ def prepare_data(input, target, maxlen=None, return_neg=False):
target), numpy.array(lengths_x)



test_data = DataIterator(test_file,
uid_voc,
mid_voc,
Expand All @@ -107,12 +106,9 @@ def prepare_data(input, target, maxlen=None, return_neg=False):

for src, tgt in test_data:
uids, mids, cats, mid_his, cat_his, mid_mask, target, sl = prepare_data(src, tgt)

all_data = [uids, mids, cats, mid_his, cat_his, mid_mask, target, sl]

for cur_data in all_data:
cur_data = numpy.squeeze(cur_data).reshape(-1)

for col in range(cur_data.shape[0]):
uid = cur_data[col]
# print(uid)
Expand All @@ -125,13 +121,5 @@ def prepare_data(input, target, maxlen=None, return_neg=False):
if counter >= 1:
break
counter += 1


f.close()







f.close()
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ def main(n_uid,n_mid,n_cat):

with tf.Session() as sess1:


model = Model_DIN_V2_Gru_Vec_attGru_Neg(n_uid, n_mid, n_cat,
EMBEDDING_DIM, HIDDEN_SIZE,
ATTENTION_SIZE)
Expand Down Expand Up @@ -78,7 +77,7 @@ def main(n_uid,n_mid,n_cat):
parser.add_argument('--checkpoint',
help='ckpt path',
required=False,
default='./data')
default='../data')
parser.add_argument('--bf16',
help='enable DeepRec BF16 in deep model. Default FP32',
action='store_true')
Expand All @@ -96,7 +95,6 @@ def main(n_uid,n_mid,n_cat):
mid_d = load_dict(mid_voc)
cat_d = load_dict(cat_voc)


main(len(uid_d),len(mid_d),len(cat_d))


Original file line number Diff line number Diff line change
Expand Up @@ -23,94 +23,59 @@ static const char* model_config = "{ \
} ";


::tensorflow::eas::ArrayProto get_proto_float_1(std::vector<float>& cur_vector){
::tensorflow::eas::ArrayShape array_shape;
::tensorflow::eas::ArrayDataType dtype_f =
::tensorflow::eas::ArrayDataType::DT_FLOAT;

array_shape.add_dim(1);
::tensorflow::eas::ArrayProto input;
input.add_float_val((float)cur_vector.back());
input.set_dtype(dtype_f);
*(input.mutable_array_shape()) = array_shape;
return input;

}

::tensorflow::eas::ArrayProto get_proto_float_2(std::vector<float>& cur_vector){
::tensorflow::eas::ArrayProto get_proto_cc(std::vector<char*>& cur_vector, ::tensorflow::eas::ArrayDataType dtype_f){
::tensorflow::eas::ArrayShape array_shape;
::tensorflow::eas::ArrayDataType dtype_f =
::tensorflow::eas::ArrayDataType::DT_FLOAT;
int num_elem = (int)cur_vector.size();

array_shape.add_dim(1);
if((int)cur_vector.size() < 0){

array_shape.add_dim(1);
::tensorflow::eas::ArrayProto input;
input.add_float_val(1.0);
input.set_dtype(dtype_f);
*(input.mutable_array_shape()) = array_shape;

return input;
}
array_shape.add_dim((int)cur_vector.size());

::tensorflow::eas::ArrayProto input;
for(int tt = 0; tt < (int)cur_vector.size(); ++tt)
{
input.add_float_val((float)cur_vector[tt]);
}

int num_elem = (int)cur_vector.size();
input.set_dtype(dtype_f);
*(input.mutable_array_shape()) = array_shape;

return input;

}

::tensorflow::eas::ArrayProto get_proto_int_1(std::vector<int>& cur_vector){
::tensorflow::eas::ArrayShape array_shape;
::tensorflow::eas::ArrayDataType dtype_i =
::tensorflow::eas::ArrayDataType::DT_INT32;

array_shape.add_dim(1);
::tensorflow::eas::ArrayProto input;
input.add_int_val((int)cur_vector.back());
input.set_dtype(dtype_i);
*(input.mutable_array_shape()) = array_shape;
return input;

switch(dtype_f){
case 1:
array_shape.add_dim(1);
if (num_elem == 1){
input.add_float_val((float)atof(cur_vector.back()));
*(input.mutable_array_shape()) = array_shape;
return input;
}
array_shape.add_dim(cur_vector.size());
for(unsigned int tt = 0; tt < cur_vector.size(); ++tt)
{
input.add_float_val((float)atof(cur_vector[tt]));
}
*(input.mutable_array_shape()) = array_shape;

return input;

break;

case 3:
array_shape.add_dim(1);
if (num_elem == 1){
input.add_int_val((int)atoi(cur_vector.back()));
*(input.mutable_array_shape()) = array_shape;
return input;
}
array_shape.add_dim(cur_vector.size());
for(unsigned int tt = 0; tt < cur_vector.size(); ++tt)
{
input.add_int_val((int)atoi(cur_vector[tt]));
}
*(input.mutable_array_shape()) = array_shape;

return input;
break;

default:
break;
}

std::cerr << "type error\n";
return input;
}

::tensorflow::eas::ArrayProto get_proto_int_2(std::vector<int>& cur_vector){
::tensorflow::eas::ArrayShape array_shape;
::tensorflow::eas::ArrayDataType dtype_f =
::tensorflow::eas::ArrayDataType::DT_INT32;
int num_elem = (int)cur_vector.size();

array_shape.add_dim(1);
if((int)cur_vector.size() < 0){

array_shape.add_dim(1);
::tensorflow::eas::ArrayProto input;
input.add_int_val(1);
input.set_dtype(dtype_f);
*(input.mutable_array_shape()) = array_shape;

return input;
}
array_shape.add_dim((int)cur_vector.size());
::tensorflow::eas::ArrayProto input;
for(int tt = 0; tt < (int)cur_vector.size(); ++tt)
{
input.add_int_val((int)cur_vector[tt]);
}
input.set_dtype(dtype_f);
*(input.mutable_array_shape()) = array_shape;

return input;

}


int main(int argc, char** argv) {
Expand All @@ -134,14 +99,14 @@ int main(int argc, char** argv) {
int cur_type = 0;

// vector variables
std::vector<int> cur_uids;
std::vector<int> cur_mids;
std::vector<int> cur_cats;
std::vector<int> cur_sl; // single
std::vector<int> cur_mid_his;
std::vector<int> cur_cat_his;
std::vector<float> cur_mid_mask;
std::vector<float> cur_target; // multiple
std::vector<char*> cur_uids;
std::vector<char*> cur_mids;
std::vector<char*> cur_cats;
std::vector<char*> cur_sl; // single
std::vector<char*> cur_mid_his;
std::vector<char*> cur_cat_his;
std::vector<char*> cur_mid_mask;
std::vector<char*> cur_target; // multiple

// temp pointers
std::vector<char*> temp_ptrs;
Expand Down Expand Up @@ -184,56 +149,56 @@ int main(int argc, char** argv) {

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_uids.push_back((int) atoi(temp_ptrs.back()));
cur_uids.push_back(temp_ptrs.back());
break;

case 1:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_mids.push_back((int) atoi(temp_ptrs.back()));
cur_mids.push_back(temp_ptrs.back());
break;

case 2:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_cats.push_back((int) atoi(temp_ptrs.back()));
cur_cats.push_back(temp_ptrs.back());
break;

case 3:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_mid_his.push_back((int) atoi(temp_ptrs.back()));
cur_mid_his.push_back(temp_ptrs.back());
break;

case 4:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_cat_his.push_back((int) atoi(temp_ptrs.back()));
cur_cat_his.push_back(temp_ptrs.back());
break;

case 5:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_mid_mask.push_back((float) atof(temp_ptrs.back()));
cur_mid_mask.push_back(temp_ptrs.back());
break;

case 6:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_target.push_back((float) atof(temp_ptrs.back()));
cur_target.push_back(temp_ptrs.back());
break;

case 7:

temp_ptrs.push_back((char*) malloc(sizeof(char)*strlen(record)));
strcpy(temp_ptrs.back(),record);
cur_sl.push_back((int) atoi(temp_ptrs.back()));
cur_sl.push_back(temp_ptrs.back());
break;

default:
Expand All @@ -244,18 +209,20 @@ int main(int argc, char** argv) {

}

// // ---------------------------------------prepare request--------------------------------------


::tensorflow::eas::ArrayDataType dtype_i =
::tensorflow::eas::ArrayDataType::DT_INT32;
::tensorflow::eas::ArrayDataType dtype_f =
::tensorflow::eas::ArrayDataType::DT_FLOAT;
// get all inputs
::tensorflow::eas::ArrayProto proto_uids = get_proto_int_1(cur_uids); // -1
::tensorflow::eas::ArrayProto proto_mids = get_proto_int_1(cur_mids); // -1
::tensorflow::eas::ArrayProto proto_cats = get_proto_int_1(cur_cats); // -1
::tensorflow::eas::ArrayProto proto_mid_his = get_proto_int_2(cur_mid_his); // -1 -1
::tensorflow::eas::ArrayProto proto_cat_his = get_proto_int_2(cur_cat_his); // -1 -1
::tensorflow::eas::ArrayProto proto_mid_mask= get_proto_float_2(cur_mid_mask); //float // -1 -1
::tensorflow::eas::ArrayProto proto_target = get_proto_float_2(cur_target); //float // -1 -1
::tensorflow::eas::ArrayProto proto_sl = get_proto_int_1(cur_sl); // -1
::tensorflow::eas::ArrayProto proto_uids = get_proto_cc(cur_uids,dtype_i); // -1
::tensorflow::eas::ArrayProto proto_mids = get_proto_cc(cur_mids,dtype_i); // -1
::tensorflow::eas::ArrayProto proto_cats = get_proto_cc(cur_cats,dtype_i); // -1
::tensorflow::eas::ArrayProto proto_mid_his = get_proto_cc(cur_mid_his,dtype_i); // -1 -1
::tensorflow::eas::ArrayProto proto_cat_his = get_proto_cc(cur_cat_his,dtype_i); // -1 -1
::tensorflow::eas::ArrayProto proto_mid_mask= get_proto_cc(cur_mid_mask,dtype_f); //float // -1 -1
::tensorflow::eas::ArrayProto proto_target = get_proto_cc(cur_target,dtype_f); //float // -1 -1
::tensorflow::eas::ArrayProto proto_sl = get_proto_cc(cur_sl,dtype_i); // -1


// setup request
Expand All @@ -276,7 +243,7 @@ int main(int argc, char** argv) {
void *buffer1 = malloc(size);
req.SerializeToArray(buffer1, size);

// // -------------------------------------process and get feedback-----------------------------------------
// ----------------------------------------------process and get feedback---------------------------------------------------
void* output = nullptr;
int output_size = 0;
state = process(model, buffer1, size, &output, &output_size);
Expand Down
Loading

0 comments on commit b52d097

Please sign in to comment.