Skip to content

Commit

Permalink
fix the last 2 files
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Nov 1, 2024
1 parent d03702a commit c611955
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 34 deletions.
42 changes: 12 additions & 30 deletions deepmd/pd/model/network/init.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
# SPDX-License-Identifier: LGPL-3.0-or-later

# Copyright (c) 2024 The PyTorch Authors. All rights reserved.
#
# This file includes source code from PyTorch of version v2.3.0, which is released under the BSD-3-Clause license.
# For more information about PyTorch, visit https://pytorch.org/.


# These no_grad_* functions are necessary as wrappers around the parts of these
# functions that use `with paddle.no_grad()`. The JIT doesn't support context
# managers, so these need to be implemented as builtins. Using these wrappers
# lets us keep those builtins small and re-usable.

from __future__ import (
annotations,
)
Expand All @@ -13,16 +25,7 @@

PaddleGenerator = paddle.base.libpaddle.Generator

# Copyright (c) 2024 The PyTorch Authors. All rights reserved.
#
# This file includes source code from PyTorch of version v2.3.0, which is released under the BSD-3-Clause license.
# For more information about PyTorch, visit https://pytorch.org/.


# These no_grad_* functions are necessary as wrappers around the parts of these
# functions that use `with paddle.no_grad()`. The JIT doesn't support context
# managers, so these need to be implemented as builtins. Using these wrappers
# lets us keep those builtins small and re-usable.
def _no_grad_uniform_(tensor: paddle.Tensor, a, b, generator=None):
with paddle.no_grad():
return tensor.uniform_(a, b)
Expand Down Expand Up @@ -167,8 +170,6 @@ def _calculate_fan_in_and_fan_out(tensor, reverse=False):

receptive_field_size = 1
if tensor.ndim > 2:
# math.prod is not always available, accumulate the product manually
# we could use functools.reduce but that is not supported by TorchScript
for s in tensor.shape[2:]:
receptive_field_size *= s
fan_in = num_input_fmaps * receptive_field_size
Expand Down Expand Up @@ -227,10 +228,6 @@ def constant_(tensor: Tensor, val: float) -> Tensor:
>>> w = paddle.empty(3, 5)
>>> nn.init.constant_(w, 0.3)
"""
# if paddle.overrides.has_torch_function_variadic(tensor):
# return paddle.overrides.handle_torch_function(
# constant_, (tensor,), tensor=tensor, val=val
# )
return _no_grad_fill_(tensor, val)


Expand All @@ -255,10 +252,6 @@ def normal_(
>>> w = paddle.empty(3, 5)
>>> nn.init.normal_(w)
"""
# if paddle.overrides.has_torch_function_variadic(tensor):
# return paddle.overrides.handle_torch_function(
# normal_, (tensor,), tensor=tensor, mean=mean, std=std
# )
return _no_grad_normal_(tensor, mean, std, generator)


Expand Down Expand Up @@ -333,17 +326,6 @@ def kaiming_uniform_(
>>> w = paddle.empty(3, 5)
>>> nn.init.kaiming_uniform_(w, mode="fan_in", nonlinearity="relu")
"""
# if paddle.overrides.has_torch_function_variadic(tensor):
# return paddle.overrides.handle_torch_function(
# kaiming_uniform_,
# (tensor,),
# tensor=tensor,
# a=a,
# mode=mode,
# nonlinearity=nonlinearity,
# generator=generator,
# )

if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
Expand Down
7 changes: 3 additions & 4 deletions deepmd/pd/model/task/property.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
Optional,
)

import torch
import paddle

from deepmd.dpmodel import (
FittingOutputDef,
Expand Down Expand Up @@ -47,7 +47,7 @@ class PropertyFittingNet(InvarFitting):
The dimension of outputs of fitting net.
neuron : list[int]
Number of neurons in each hidden layers of the fitting net.
bias_atom_p : torch.Tensor, optional
bias_atom_p : paddle.Tensor, optional
Average property per atom for each element.
intensive : bool, optional
Whether the fitting property is intensive.
Expand Down Expand Up @@ -78,7 +78,7 @@ def __init__(
dim_descrpt: int,
task_dim: int = 1,
neuron: list[int] = [128, 128, 128],
bias_atom_p: Optional[torch.Tensor] = None,
bias_atom_p: Optional[paddle.Tensor] = None,
intensive: bool = False,
bias_method: str = "normal",
resnet_dt: bool = True,
Expand Down Expand Up @@ -147,5 +147,4 @@ def serialize(self) -> dict:

return dd

# make jit happy with torch 2.0.0
exclude_types: list[int]

0 comments on commit c611955

Please sign in to comment.