Skip to content

Commit

Permalink
modified: dsm/dsm_torch.py
Browse files Browse the repository at this point in the history
  • Loading branch information
chiragnagpal committed Jan 30, 2021
1 parent 49a9580 commit c9295ff
Showing 1 changed file with 5 additions and 138 deletions.
143 changes: 5 additions & 138 deletions dsm/dsm_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,48 +188,15 @@ def __init__(self, inputdim, k, layers=None, dist='Weibull',
layers = []
self.layers = layers

# if self.dist in ['Weibull']:
# self.act = nn.SELU()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['Normal']:
# self.act = nn.Identity()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['LogNormal']:
# self.act = nn.Tanh()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# else:
# raise NotImplementedError('Distribution: '+self.dist+' not implemented'+
# ' yet.')

self.embedding = create_representation(inputdim, layers, 'ReLU6')

if len(layers) == 0:
lastdim = inputdim
else:
lastdim = layers[-1]

# self.gate = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(lastdim, k, bias=False)
# ) for r in range(self.risks)})

# self.scaleg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(lastdim, k, bias=True)
# ) for r in range(self.risks)})
self._init_dsm_layers(lastdim)

# self.shapeg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(lastdim, k, bias=True)
# ) for r in range(self.risks)})
self.embedding = create_representation(inputdim, layers, 'ReLU6')

self._init_dsm_layers(lastdim)

def forward(self, x, risk='1'):
"""The forward function that is called when data is passed through DSM.
Expand Down Expand Up @@ -308,40 +275,6 @@ def __init__(self, inputdim, k, typ='LSTM', layers=1,

self._init_dsm_layers(hidden)

# if self.dist in ['Weibull']:
# self.act = nn.SELU()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['Normal']:
# self.act = nn.Identity()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['LogNormal']:
# self.act = nn.Tanh()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# else:
# raise NotImplementedError('Distribution: '+self.dist+' not implemented'+
# ' yet.')

# self.gate = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=False)
# ) for r in range(self.risks)})

# self.scaleg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

# self.shapeg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

if self.typ == 'LSTM':
self.embedding = nn.LSTM(inputdim, hidden, layers,
bias=False, batch_first=True)
Expand All @@ -353,6 +286,8 @@ def __init__(self, inputdim, k, typ='LSTM', layers=1,
self.embedding = nn.GRU(inputdim, hidden, layers,
bias=False, batch_first=True)



def forward(self, x, risk='1'):
"""The forward function that is called when data is passed through DSM.
Expand All @@ -376,7 +311,7 @@ def forward(self, x, risk='1'):
xrep = nn.ReLU6()(xrep)

dim = xrep.shape[0]

return(self.act(self.shapeg[risk](xrep))+self.shape[risk].expand(dim, -1),
self.act(self.scaleg[risk](xrep))+self.scale[risk].expand(dim, -1),
self.gate[risk](xrep)/self.temp)
Expand Down Expand Up @@ -492,40 +427,6 @@ def __init__(self, inputdim, k, typ='ConvNet',

self._init_dsm_layers(hidden)

# if self.dist in ['Weibull']:
# self.act = nn.SELU()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['Normal']:
# self.act = nn.Identity()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['LogNormal']:
# self.act = nn.Tanh()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# else:
# raise NotImplementedError('Distribution: '+self.dist+' not implemented'+
# ' yet.')

# self.gate = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=False)
# ) for r in range(self.risks)})

# self.scaleg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

# self.shapeg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

self.embedding = create_conv_representation(inputdim=inputdim,
hidden=hidden,
typ='ConvNet')
Expand Down Expand Up @@ -609,40 +510,6 @@ def __init__(self, inputdim, k, typ='LSTM', layers=1,

self._init_dsm_layers(hidden)

# if self.dist in ['Weibull']:
# self.act = nn.SELU()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(-torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['Normal']:
# self.act = nn.Identity()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# elif self.dist in ['LogNormal']:
# self.act = nn.Tanh()
# self.shape = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# self.scale = nn.ParameterDict({str(r+1): nn.Parameter(torch.ones(k))
# for r in range(self.risks)})
# else:
# raise NotImplementedError('Distribution: '+self.dist+' not implemented'+
# ' yet.')

# self.gate = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=False)
# ) for r in range(self.risks)})

# self.scaleg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

# self.shapeg = nn.ModuleDict({str(r+1): nn.Sequential(
# nn.Linear(hidden, k, bias=True)
# ) for r in range(self.risks)})

if self.typ == 'LSTM':
self.embedding = nn.LSTM(inputdim, hidden, layers,
bias=False, batch_first=True)
Expand Down

0 comments on commit c9295ff

Please sign in to comment.