Skip to content

Commit

Permalink
Third modified version according to the comments
Browse files Browse the repository at this point in the history
  • Loading branch information
root committed Sep 15, 2022
1 parent 4771c75 commit 98a68c9
Show file tree
Hide file tree
Showing 8 changed files with 35 additions and 48 deletions.
8 changes: 8 additions & 0 deletions deepmd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,14 @@ def main_parser() -> argparse.ArgumentParser:
type=str,
help="the model after passing parameters",
)
parser_transfer.add_argument(
"-a",
"--ascend-graph",
default="",
type=str,
help="the model with constant natoms input, which is onle used for Ascend platform",
)


# * config parser ******************************************************************
parser_train = subparsers.add_parser(
Expand Down
12 changes: 5 additions & 7 deletions deepmd/entrypoints/transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,11 @@ def transfer(*, old_model: str, raw_model: str, output: str, **kwargs):
f.write(new_graph_def.SerializeToString())
log.info("the output model is saved in " + output)
dp_float_prec = os.environ.get("DP_INTERFACE_PREC", "high").lower()
if dp_float_prec == "ascend_mix":
if kwargs['ascend_graph']:
const_graph_def = modify_const_op(new_graph_def)
if output.endswith(".pb"):
const_out = output[:-3] + "_const.pb"
else:
const_out = output + "_const"
with tf.gfile.GFile(const_out, mode="wb") as f:
with tf.gfile.GFile(kwargs['ascend_graph'], mode="wb") as f:
f.write(const_graph_def.SerializeToString())
log.info("the dp test model is saved in " + const_out)
log.info("the dp test model is saved in " + kwargs['ascend_graph'])


def load_graph(graph_name: str) -> tf.Graph:
Expand Down Expand Up @@ -108,6 +104,8 @@ def modify_const_op(new_graph_def: tf.Graph) -> tf.Graph:
new_graph : tf.Graph
orginal new graph
Returns :
-------
tf.Graph
natoms transfer to a const op for Ascend platform
-------
"""
Expand Down
7 changes: 0 additions & 7 deletions deepmd/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,6 @@ def _get_package_constants(

# FLOAT_PREC
dp_float_prec = os.environ.get("DP_INTERFACE_PREC", "high").lower()
GLOBAL_ASCEND_OUT_PRECISION = None
if dp_float_prec in ("high", ""):
# default is high
GLOBAL_TF_FLOAT_PRECISION = tf.float64
Expand All @@ -369,12 +368,6 @@ def _get_package_constants(
GLOBAL_NP_FLOAT_PRECISION = np.float32
GLOBAL_ENER_FLOAT_PRECISION = np.float64
global_float_prec = "float"
elif dp_float_prec == "ascend_mix":
GLOBAL_TF_FLOAT_PRECISION = tf.float32
GLOBAL_NP_FLOAT_PRECISION = np.float32
GLOBAL_ENER_FLOAT_PRECISION = np.float64
GLOBAL_ASCEND_OUT_PRECISION = np.float32
global_float_prec = "float"
else:
raise RuntimeError(
"Unsupported float precision option: %s. Supported: high,"
Expand Down
14 changes: 7 additions & 7 deletions deepmd/utils/network.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np

from deepmd.env import tf
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION, GLOBAL_ASCEND_OUT_PRECISION
from deepmd.env import GLOBAL_TF_FLOAT_PRECISION
from deepmd.common import get_precision

def one_layer_rand_seed_shift():
Expand All @@ -11,6 +11,7 @@ def one_layer(inputs,
outputs_size,
activation_fn=tf.nn.tanh,
precision = GLOBAL_TF_FLOAT_PRECISION,
out_precision = GLOBAL_TF_FLOAT_PRECISION,
stddev=1.0,
bavg=0.0,
name='linear',
Expand Down Expand Up @@ -45,28 +46,27 @@ def one_layer(inputs,
w_initializer,
trainable = trainable)
variable_summaries(w, 'matrix')
if final_layer and GLOBAL_ASCEND_OUT_PRECISION is not None:
if final_layer:
b = tf.get_variable('bias',
[outputs_size],
GLOBAL_ASCEND_OUT_PRECISION,
out_precision,
b_initializer,
trainable = trainable)
variable_summaries(b, 'bias')
else:
b = tf.get_variable('bias',
[outputs_size],
precision,
b_initializer,
trainable = trainable)
variable_summaries(b, 'bias')
variable_summaries(b, 'bias')

if mixed_prec is not None and not final_layer:
inputs = tf.cast(inputs, get_precision(mixed_prec['compute_prec']))
w = tf.cast(w, get_precision(mixed_prec['compute_prec']))
b = tf.cast(b, get_precision(mixed_prec['compute_prec']))

if final_layer and GLOBAL_ASCEND_OUT_PRECISION is not None:
hidden = tf.cast(tf.matmul(inputs, w), dtype=GLOBAL_ASCEND_OUT_PRECISION)
if final_layer:
hidden = tf.cast(tf.matmul(inputs, w), dtype=out_precision)
else:
hidden = tf.matmul(inputs, w)
hidden = tf.nn.bias_add(hidden, b)
Expand Down
6 changes: 5 additions & 1 deletion deepmd/utils/transfer_to_ascend.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,11 @@ def mix_precision(
# stage 3: transfer the mix-precision model
log.info("\n\n")
log.info("stage 3: transfer the mix-precision model")
transfer(old_model=input, raw_model=output, output=output)
if output.endswith(".pb"):
const_out = output[:-3] + "_const.pb"
else:
const_out = output + "_const"
transfer(old_model=input, raw_model=output, output=output, ascend_graph=const_out)

def _check_transfer_model_type(model_file):
try:
Expand Down
20 changes: 8 additions & 12 deletions source/api_cc/include/DeepPot.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,28 +29,24 @@ class DeepPot
* @param[in] file_content The content of the model file. If it is not empty, DP will read from the string instead of the file.
**/
void init (const std::string & model, const int & gpu_rank = 0, const std::string & file_content = "");
/**
* @brief Print the DP summary to the screen.
* @param[in] pre The prefix to each line.
**/
#if HUAWEI_ASCEND
DeepPot (const std::string & model, const int & nloc, const int & gpu_rank = 0);
/**
* @brief Initialize the DP.
* @param[in] model The name of the frozen model file.
* @param[in] gpu_rank The GPU rank. Default is 0.
* @brief Initialize the DP on Ascend platform.
* @param[in] npu_rank The NPU rank. Default is 0.
**/
void init (const std::string & model, const int & nloc, const int & gpu_rank = 0);
void init (const int & npu_rank = 0);
/**
* @brief Print the DP summary to the screen.
* @param[in] pre The prefix to each line.
* @brief Initialize and modify the graph excute on the Ascend platform.
* @param[in] model The name of the frozen model file.
* @param[in] type_count The number of atoms of each type.
* @param[in] file_content The content of the model file. If it is not empty, DP will read from the string instead of the file.
**/
void init_graph (const std::string & model, const std::vector<int > & type_count, const std::string & file_content = "");
#endif //HUAWEI_ASCEND
/**
* @brief Print the DP summary to the screen.
* @param[in] pre The prefix to each line.
**/
#endif //HUAWEI_ASCEND
void print_summary(const std::string &pre) const;
public:
/**
Expand Down
14 changes: 1 addition & 13 deletions source/api_cc/src/DeepPot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -257,21 +257,9 @@ DeepPot (const std::string & model, const int & gpu_rank, const std::string & fi
init(model, gpu_rank, file_content);
}

#if HUAWEI_ASCEND
DeepPot::
DeepPot (const std::string & model, const int & nloc, const int & npu_rank)
: inited (false), init_nbor (false),
graph_def(new GraphDef())
{
init(model, nloc, npu_rank);
}
#endif //HUAWEI_ASCEND

DeepPot::~DeepPot() {
delete graph_def;
#if HUAWEI_ASCEND
delete session;
#endif //HUAWEI_ASCEND
}

void
Expand Down Expand Up @@ -335,7 +323,7 @@ init (const std::string & model, const int & gpu_rank, const std::string & file_
#if HUAWEI_ASCEND
void
DeepPot::
init (const std::string & model, const int & nloc, const int & npu_rank)
init (const int & npu_rank)
{
if (inited){
std::cerr << "WARNING: deepmd-kit should not be initialized twice, do nothing at the second call of initializer" << std::endl;
Expand Down
2 changes: 1 addition & 1 deletion source/lmp/pair_deepmd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ void PairDeepMD::settings(int narg, char **arg)
type_count[type[ii]-1] ++;
}
deep_pot.init_graph (arg[0], type_count, get_file_content(arg[0]));
deep_pot.init (arg[0], nlocal, get_node_rank());
deep_pot.init (get_node_rank());
#else
deep_pot.init (arg[0], get_node_rank(), get_file_content(arg[0]));
#endif //HUAWEI_ASCEND
Expand Down

0 comments on commit 98a68c9

Please sign in to comment.