-
Notifications
You must be signed in to change notification settings - Fork 4
/
nn.cc
86 lines (66 loc) · 2.33 KB
/
nn.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#include "nn/nn.h"
#include <fstream>
namespace nn {
pred_nn_t make_pred_nn(std::shared_ptr<tensor_tree::vertex> var_tree,
std::shared_ptr<autodiff::op_t> input)
{
pred_nn_t result;
result.logprob = autodiff::logsoftmax(
autodiff::add(autodiff::mul(input, tensor_tree::get_var(var_tree->children[0])),
tensor_tree::get_var(var_tree->children[1])));
return result;
}
std::shared_ptr<tensor_tree::vertex> make_pred_tensor_tree()
{
tensor_tree::vertex root { "nil" };
root.children.push_back(tensor_tree::make_tensor("softmax weight"));
root.children.push_back(tensor_tree::make_tensor("softmax bias"));
return std::make_shared<tensor_tree::vertex>(root);
}
log_loss::log_loss(la::cpu::tensor_like<double> const& gold,
la::cpu::tensor_like<double> const& pred)
: gold(gold), pred(pred)
{}
double log_loss::loss()
{
return -la::cpu::dot(gold, pred);
}
la::cpu::tensor<double> log_loss::grad(double scale)
{
la::cpu::tensor<double> result;
la::cpu::resize_as(result, gold);
la::cpu::axpy(result, -scale, gold);
return result;
}
l2_loss::l2_loss(la::cpu::tensor_like<double> const& gold, la::cpu::tensor_like<double> const& pred)
: gold(gold), pred(pred)
{}
double l2_loss::loss()
{
la::cpu::tensor<double> diff;
diff.resize(gold.sizes());
la::cpu::copy(diff, gold);
la::cpu::axpy(diff, -1, pred);
return la::cpu::dot(diff, diff);
}
la::cpu::tensor<double> l2_loss::grad(double scale)
{
la::cpu::tensor<double> g;
g.resize(pred.sizes());
la::cpu::axpy(g, 2 * scale, pred);
la::cpu::axpy(g, -2 * scale, gold);
return g;
}
seq_pred_nn_t make_seq_pred_nn(
std::shared_ptr<tensor_tree::vertex> var_tree,
std::vector<std::shared_ptr<autodiff::op_t>> const& feat)
{
seq_pred_nn_t result;
for (int i = 0; i < feat.size(); ++i) {
result.logprob.push_back(autodiff::logsoftmax(autodiff::add(
autodiff::mul(feat[i], tensor_tree::get_var(var_tree->children[0])),
tensor_tree::get_var(var_tree->children[1]))));
}
return result;
}
}