Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add dipole consistency test #3321

Merged
merged 37 commits into from
Feb 23, 2024
Merged
Show file tree
Hide file tree
Changes from 31 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
e9dcf0f
feat: add dipole consistency test
anyangml Feb 22, 2024
fb7447c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
2ad5db7
fix: add serialize, deserialize
anyangml Feb 22, 2024
3f38853
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
5e78ce3
fix: UTs
anyangml Feb 22, 2024
e857afe
fix: UTs
anyangml Feb 22, 2024
cecdb8c
fix: UTs
anyangml Feb 22, 2024
501c534
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
a3d4b00
fix: UTs
anyangml Feb 22, 2024
c5fd81b
fix: UTs
anyangml Feb 22, 2024
eee3048
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
6da24c0
fix: UTs
anyangml Feb 22, 2024
e9d64cf
fix: UTs
anyangml Feb 22, 2024
8694af5
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
58543e5
fix: UTs
anyangml Feb 22, 2024
0c0f28f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
36188a8
fix: UTs
anyangml Feb 22, 2024
b453471
chore: move atom_ener to energyfitting
anyangml Feb 22, 2024
13df1db
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
6052f7e
fix: UTs
anyangml Feb 22, 2024
511e15e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
15b7425
fix: UTs
anyangml Feb 22, 2024
737cf37
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 22, 2024
186a15e
fix: UTs
anyangml Feb 23, 2024
0a84f33
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
abad3dd
chore: refactor
anyangml Feb 23, 2024
3e0f3d8
fix: se_r, se_t
anyangml Feb 23, 2024
d2cbe46
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
8bbaf25
chore: refactor
anyangml Feb 23, 2024
88d81a0
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
4c20c10
chore: typo
anyangml Feb 23, 2024
f538039
Merge branch 'devel' into devel
anyangml Feb 23, 2024
3e2d1f9
chore: refactor
anyangml Feb 23, 2024
9762912
chore: typo
anyangml Feb 23, 2024
344910a
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 23, 2024
9fe7bbd
Merge branch 'devel' into devel
anyangml Feb 23, 2024
9ac39b8
Merge branch 'devel' into devel
anyangml Feb 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions deepmd/dpmodel/fitting/dipole_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ class DipoleFitting(GeneralFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -91,7 +89,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[Optional[float]]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -102,6 +99,8 @@ def __init__(
r_differentiable: bool = True,
c_differentiable: bool = True,
old_impl=False,
# not used
seed: Optional[int] = None,
):
# seed, uniform_seed are not included
if tot_ener_zero:
Expand All @@ -112,8 +111,6 @@ def __init__(
raise NotImplementedError("use_aparam_as_mask is not implemented")
if layer_name is not None:
raise NotImplementedError("layer_name is not implemented")
if atom_ener is not None and atom_ener != []:
raise NotImplementedError("atom_ener is not implemented")

self.embedding_width = embedding_width
self.r_differentiable = r_differentiable
Expand All @@ -129,7 +126,6 @@ def __init__(
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand Down
5 changes: 0 additions & 5 deletions deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ class GeneralFitting(NativeOP, BaseFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -87,7 +85,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[float]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -110,7 +107,6 @@ def __init__(
self.trainable = [True for ii in range(len(self.neuron) + 1)]
if isinstance(self.trainable, bool):
self.trainable = [self.trainable] * (len(self.neuron) + 1)
self.atom_ener = atom_ener
self.activation_function = activation_function
self.precision = precision
self.layer_name = layer_name
Expand Down Expand Up @@ -236,7 +232,6 @@ def serialize(self) -> dict:
# not supported
"tot_ener_zero": self.tot_ener_zero,
"trainable": self.trainable,
"atom_ener": self.atom_ener,
"layer_name": self.layer_name,
"use_aparam_as_mask": self.use_aparam_as_mask,
"spin": self.spin,
Expand Down
5 changes: 3 additions & 2 deletions deepmd/dpmodel/fitting/invar_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[float]] = None,
atom_ener: Optional[List[float]] = [],
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -139,6 +139,7 @@
raise NotImplementedError("atom_ener is not implemented")

self.dim_out = dim_out
self.atom_ener = atom_ener

Check warning on line 142 in deepmd/dpmodel/fitting/invar_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/invar_fitting.py#L142

Added line #L142 was not covered by tests
super().__init__(
var_name=var_name,
ntypes=ntypes,
Expand All @@ -150,7 +151,6 @@
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand All @@ -163,6 +163,7 @@
def serialize(self) -> dict:
data = super().serialize()
data["dim_out"] = self.dim_out
data["atom_ener"] = self.atom_ener

Check warning on line 166 in deepmd/dpmodel/fitting/invar_fitting.py

View check run for this annotation

Codecov / codecov/patch

deepmd/dpmodel/fitting/invar_fitting.py#L166

Added line #L166 was not covered by tests
return data

def _net_out_dim(self):
Expand Down
6 changes: 0 additions & 6 deletions deepmd/dpmodel/fitting/polarizability_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ class PolarFitting(GeneralFitting):
If the weights of fitting net are trainable.
Suppose that we have :math:`N_l` hidden layers in the fitting net,
this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.
activation_function
The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN|
precision
Expand Down Expand Up @@ -93,7 +91,6 @@ def __init__(
rcond: Optional[float] = None,
tot_ener_zero: bool = False,
trainable: Optional[List[bool]] = None,
atom_ener: Optional[List[Optional[float]]] = None,
activation_function: str = "tanh",
precision: str = DEFAULT_PRECISION,
layer_name: Optional[List[Optional[str]]] = None,
Expand All @@ -115,8 +112,6 @@ def __init__(
raise NotImplementedError("use_aparam_as_mask is not implemented")
if layer_name is not None:
raise NotImplementedError("layer_name is not implemented")
if atom_ener is not None and atom_ener != []:
raise NotImplementedError("atom_ener is not implemented")

self.embedding_width = embedding_width
self.fit_diag = fit_diag
Expand All @@ -142,7 +137,6 @@ def __init__(
rcond=rcond,
tot_ener_zero=tot_ener_zero,
trainable=trainable,
atom_ener=atom_ener,
activation_function=activation_function,
precision=precision,
layer_name=layer_name,
Expand Down
5 changes: 5 additions & 0 deletions deepmd/pt/model/task/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@
Random seed.
exclude_types: List[int]
Atomic contributions of the excluded atom types are set zero.
atom_ener
Specifying atomic energy contribution in vacuum. The `set_davg_zero` key in the descrptor should be set.

"""

Expand All @@ -98,9 +100,11 @@
rcond: Optional[float] = None,
seed: Optional[int] = None,
exclude_types: List[int] = [],
atom_ener: Optional[List[float]] = None,
**kwargs,
):
self.dim_out = dim_out
self.atom_ener = atom_ener

Check warning on line 107 in deepmd/pt/model/task/ener.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/ener.py#L107

Added line #L107 was not covered by tests
super().__init__(
var_name=var_name,
ntypes=ntypes,
Expand All @@ -126,6 +130,7 @@
def serialize(self) -> dict:
data = super().serialize()
data["dim_out"] = self.dim_out
data["atom_ener"] = self.atom_ener

Check warning on line 133 in deepmd/pt/model/task/ener.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/task/ener.py#L133

Added line #L133 was not covered by tests
return data

@property
Expand Down
1 change: 0 additions & 1 deletion deepmd/pt/model/task/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,6 @@ def serialize(self) -> dict:
## NOTICE: not supported by far
"tot_ener_zero": False,
"trainable": [True] * (len(self.neuron) + 1),
"atom_ener": [],
"layer_name": None,
"use_aparam_as_mask": False,
"spin": None,
Expand Down
4 changes: 4 additions & 0 deletions deepmd/tf/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,10 @@
"""Returns the output dimension of this descriptor."""
return self.filter_neuron[-1]

def get_dim_rot_mat_1(self) -> int:
"""To accommodate dipole fitting, which needs embedding_width."""
return self.get_dim_out()

Check warning on line 209 in deepmd/tf/descriptor/se_r.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/descriptor/se_r.py#L209

Added line #L209 was not covered by tests

def get_nlist(self):
"""Returns neighbor information.

Expand Down
4 changes: 4 additions & 0 deletions deepmd/tf/descriptor/se_t.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,10 @@
"""Returns the output dimension of this descriptor."""
return self.filter_neuron[-1]

def get_dim_rot_mat_1(self) -> int:
"""To accommodate dipole fitting, which needs embedding_width."""
return self.get_dim_out()

Check warning on line 199 in deepmd/tf/descriptor/se_t.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/descriptor/se_t.py#L199

Added line #L199 was not covered by tests

def get_nlist(self) -> Tuple[tf.Tensor, tf.Tensor, List[int], List[int]]:
"""Returns neighbor information.

Expand Down
77 changes: 71 additions & 6 deletions deepmd/tf/fit/dipole.py
anyangml marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,12 @@

Parameters
----------
descrpt : tf.Tensor
The descrptor
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
embedding_width
The rotation matrix dimension of the descrptor :math:`\mathcal{D}`
neuron : List[int]
Number of neurons in each hidden layer of the fitting net
resnet_dt : bool
Expand All @@ -59,7 +63,9 @@

def __init__(
self,
descrpt: tf.Tensor,
ntypes: int,
dim_descrpt: int,
embedding_width: int,
neuron: List[int] = [120, 120, 120],
resnet_dt: bool = True,
sel_type: Optional[List[int]] = None,
Expand All @@ -70,8 +76,8 @@
**kwargs,
) -> None:
"""Constructor."""
self.ntypes = descrpt.get_ntypes()
self.dim_descrpt = descrpt.get_dim_out()
self.ntypes = ntypes
self.dim_descrpt = dim_descrpt

Check warning on line 80 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L79-L80

Added lines #L79 - L80 were not covered by tests
self.n_neuron = neuron
self.resnet_dt = resnet_dt
self.sel_type = sel_type
Expand All @@ -83,9 +89,10 @@
self.seed = seed
self.uniform_seed = uniform_seed
self.seed_shift = one_layer_rand_seed_shift()
self.activation_function_name = activation_function

Check warning on line 92 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L92

Added line #L92 was not covered by tests
self.fitting_activation_fn = get_activation_func(activation_function)
self.fitting_precision = get_precision(precision)
self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1()
self.dim_rot_mat_1 = embedding_width

Check warning on line 95 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L95

Added line #L95 was not covered by tests
self.dim_rot_mat = self.dim_rot_mat_1 * 3
self.useBN = False
self.fitting_net_variables = None
Expand Down Expand Up @@ -327,3 +334,61 @@
tensor_size=3,
label_name="dipole",
)

def serialize(self, suffix: str) -> dict:
"""Serialize the model.

Returns
-------
dict
The serialized data
"""
data = {

Check warning on line 346 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L346

Added line #L346 was not covered by tests
"var_name": "dipole",
"ntypes": self.ntypes,
"dim_descrpt": self.dim_descrpt,
"embedding_width": self.dim_rot_mat_1,
# very bad design: type embedding is not passed to the class
# TODO: refactor the class
"mixed_types": False,
"dim_out": 3,
"neuron": self.n_neuron,
"resnet_dt": self.resnet_dt,
"activation_function": self.activation_function_name,
"precision": self.fitting_precision.name,
"exclude_types": [],
"nets": self.serialize_network(
ntypes=self.ntypes,
# TODO: consider type embeddings
ndim=1,
in_dim=self.dim_descrpt,
out_dim=self.dim_rot_mat_1,
neuron=self.n_neuron,
activation_function=self.activation_function_name,
resnet_dt=self.resnet_dt,
variables=self.fitting_net_variables,
suffix=suffix,
),
}
return data

Check warning on line 373 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L373

Added line #L373 was not covered by tests

@classmethod
def deserialize(cls, data: dict, suffix: str):
"""Deserialize the model.

Parameters
----------
data : dict
The serialized data

Returns
-------
Model
The deserialized model
"""
fitting = cls(**data)
fitting.fitting_net_variables = cls.deserialize_network(

Check warning on line 390 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L389-L390

Added lines #L389 - L390 were not covered by tests
data["nets"],
suffix=suffix,
)
return fitting

Check warning on line 394 in deepmd/tf/fit/dipole.py

View check run for this annotation

Codecov / codecov/patch

deepmd/tf/fit/dipole.py#L394

Added line #L394 was not covered by tests
6 changes: 4 additions & 2 deletions deepmd/tf/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,10 @@ class EnerFitting(Fitting):

Parameters
----------
descrpt
The descrptor :math:`\mathcal{D}`
ntypes
The ntypes of the descrptor :math:`\mathcal{D}`
dim_descrpt
The dimension of the descrptor :math:`\mathcal{D}`
neuron
Number of neurons :math:`N` in each hidden layer of the fitting net
resnet_dt
Expand Down
6 changes: 5 additions & 1 deletion deepmd/tf/fit/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from typing import (
Callable,
List,
Optional,
Type,
)

Expand Down Expand Up @@ -175,6 +176,7 @@ def serialize_network(
activation_function: str,
resnet_dt: bool,
variables: dict,
out_dim: Optional[int] = 1,
suffix: str = "",
) -> dict:
"""Serialize network.
Expand All @@ -197,6 +199,8 @@ def serialize_network(
The input variables
suffix : str, optional
The suffix of the scope
out_dim : int, optional
The output dimension

Returns
-------
Expand Down Expand Up @@ -231,7 +235,7 @@ def serialize_network(
# initialize the network if it is not initialized
fittings[network_idx] = FittingNet(
in_dim=in_dim,
out_dim=1,
out_dim=out_dim,
anyangml marked this conversation as resolved.
Show resolved Hide resolved
neuron=neuron,
activation_function=activation_function,
resnet_dt=resnet_dt,
Expand Down
1 change: 1 addition & 0 deletions deepmd/tf/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,7 @@ def __init__(
spin=self.spin,
ntypes=self.descrpt.get_ntypes(),
dim_descrpt=self.descrpt.get_dim_out(),
embedding_width=self.descrpt.get_dim_rot_mat_1(),
anyangml marked this conversation as resolved.
Show resolved Hide resolved
)
self.rcut = self.descrpt.get_rcut()
self.ntypes = self.descrpt.get_ntypes()
Expand Down
1 change: 1 addition & 0 deletions deepmd/tf/model/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ def __init__(
spin=self.spin,
ntypes=self.descrpt.get_ntypes(),
dim_descrpt=self.descrpt.get_dim_out(),
embedding_width=self.descrpt.get_dim_rot_mat_1(),
)

# type embedding
Expand Down
Loading
Loading