-
Notifications
You must be signed in to change notification settings - Fork 54
/
Copy pathhybrid2.cc
145 lines (127 loc) · 4.59 KB
/
hybrid2.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
// Copyright 2020 the deepx authors.
// Author: Yafei Zhang ([email protected])
//
#include <deepx_core/graph/optimizer_impl.h>
namespace deepx_core {
class Hybrid2Optimizer : public OptimizerImpl {
private:
ll_optimizer_t::AdaGradConfig ada_grad_config_;
ll_optimizer_t::GFTRLConfig gftrl_config_;
public:
DEFINE_OPTIMIZER_LIKE(Hybrid2Optimizer);
protected:
bool InitConfigKV(const std::string& k, const std::string& v) override {
if (k == "ada_grad_alpha") {
ada_grad_config_.alpha = (float_t)std::stod(v);
if (ada_grad_config_.alpha <= 0) {
DXERROR("Invalid %s: %s.", k.c_str(), v.c_str());
return false;
}
} else if (k == "ada_grad_beta") {
ada_grad_config_.beta = (float_t)std::stod(v);
if (ada_grad_config_.beta <= 0) {
DXERROR("Invalid %s: %s.", k.c_str(), v.c_str());
return false;
}
} else if (k == "gftrl_alpha") {
gftrl_config_.alpha = (float_t)std::stod(v);
if (gftrl_config_.alpha <= 0) {
DXERROR("Invalid %s: %s.", k.c_str(), v.c_str());
return false;
}
} else if (k == "gftrl_beta") {
gftrl_config_.beta = (float_t)std::stod(v);
if (gftrl_config_.beta <= 0) {
DXERROR("Invalid %s: %s.", k.c_str(), v.c_str());
return false;
}
} else if (k == "gftrl_lambda") {
gftrl_config_.lambda = (float_t)std::stod(v);
if (gftrl_config_.lambda <= 0) {
DXERROR("Invalid %s: %s.", k.c_str(), v.c_str());
return false;
}
} else {
DXERROR("Unexpected config: %s=%s.", k.c_str(), v.c_str());
return false;
}
return true;
}
bool PostInitConfig() override {
ll_optimizer_t::Init(&ada_grad_config_);
ll_optimizer_t::Init(&gftrl_config_);
return true;
}
void WriteConfigLegacy(OutputStream& os) const override {
int version = 0;
os << version;
os << ada_grad_config_.alpha << ada_grad_config_.beta;
os << gftrl_config_.alpha << gftrl_config_.beta << gftrl_config_.lambda;
}
void ReadConfigLegacy(InputStream& is) override {
int version;
is >> version;
if (!is) {
DXERROR("Failed to read config.");
return;
}
if (version > 0) {
DXERROR("Couldn't handle a higher version: %d.", version);
is.set_bad();
return;
}
is >> ada_grad_config_.alpha >> ada_grad_config_.beta;
is >> gftrl_config_.alpha >> gftrl_config_.beta >> gftrl_config_.lambda;
gftrl_config_.inv_alpha = 1 / gftrl_config_.alpha;
}
void CopyConfigLegacy(const Optimizer& other) override {
ada_grad_config_ = ((const Hybrid2Optimizer&)other).ada_grad_config_;
gftrl_config_ = ((const Hybrid2Optimizer&)other).gftrl_config_;
}
void InitParamTSR(const std::string& /*name*/, const tsr_t& W,
OptimizerTSRSlot* slot) const override {
slot->O.resize(1);
slot->O[0].resize(W.shape());
slot->O[0].zeros();
}
void InitParamSRM(const std::string& /*name*/, const srm_t& W,
OptimizerSRMSlot* slot) const override {
slot->O.resize(2);
slot->O[0].clear();
slot->O[0].set_col(W.col());
slot->O[0].set_initializer(TENSOR_INITIALIZER_TYPE_ZEROS);
slot->O[1].clear();
slot->O[1].set_col(W.col());
slot->O[1].set_initializer(TENSOR_INITIALIZER_TYPE_ZEROS);
}
void PreUpdate() override {
ll_optimizer_t::PreBatch(&ada_grad_config_);
ll_optimizer_t::PreBatch(&gftrl_config_);
}
void PostUpdate() override {
ll_optimizer_t::PostBatch(&ada_grad_config_);
ll_optimizer_t::PostBatch(&gftrl_config_);
}
void UpdateTSR2TSR(const std::string& /*name*/, const tsr_t& G, tsr_t* W,
OptimizerTSRSlot* slot) const override {
ll_optimizer_t::UpdateTSR2TSR(ada_grad_config_, G, W, &slot->O[0]);
}
void UpdateSRM2TSR(const std::string& /*name*/, const srm_t& G, tsr_t* W,
OptimizerTSRSlot* slot) const override {
ll_optimizer_t::UpdateSRM2TSR(ada_grad_config_, G, W, &slot->O[0]);
}
void UpdateSRM2SRM(const std::string& /*name*/, const srm_t& G, srm_t* W,
OptimizerSRMSlot* slot) const override {
if (use_lock_) {
ll_optimizer_t::UpdateSRM2SRM(gftrl_config_, G, W, &slot->O[0],
&slot->O[1], slot->Wlock.get(),
slot->Olock[0].get(), slot->Olock[1].get());
} else {
ll_optimizer_t::UpdateSRM2SRM(gftrl_config_, G, W, &slot->O[0],
&slot->O[1]);
}
}
};
OPTIMIZER_REGISTER(Hybrid2Optimizer, "Hybrid2Optimizer");
OPTIMIZER_REGISTER(Hybrid2Optimizer, "hybrid2");
} // namespace deepx_core