-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
backup.go
302 lines (262 loc) · 8.22 KB
/
backup.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
// +build !oss
/*
* Copyright 2018 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Dgraph Community License (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* https://github.com/dgraph-io/dgraph/blob/master/licenses/DCL.txt
*/
package backup
import (
"bytes"
"compress/gzip"
"context"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/url"
"sync"
"github.com/dgraph-io/badger"
bpb "github.com/dgraph-io/badger/pb"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/dgraph-io/dgraph/posting"
"github.com/dgraph-io/dgraph/protos/pb"
"github.com/dgraph-io/dgraph/x"
)
// Processor handles the different stages of the backup process.
type Processor struct {
// DB is the Badger pstore managed by this node.
DB *badger.DB
// Request stores the backup request containing the parameters for this backup.
Request *pb.BackupRequest
}
// Manifest records backup details, these are values used during restore.
// Since is the timestamp from which the next incremental backup should start (it's set
// to the readTs of the current backup).
// Groups are the IDs of the groups involved.
type Manifest struct {
sync.Mutex
//Type is the type of backup, either full or incremental.
Type string `json:"type"`
// Since is the timestamp at which this backup was taken. It's called Since
// because it will become the timestamp from which to backup in the next
// incremental backup.
Since uint64 `json:"since"`
// Groups is the map of valid groups to predicates at the time the backup was created.
Groups map[uint32][]string `json:"groups"`
// BackupId is a unique ID assigned to all the backups in the same series
// (from the first full backup to the last incremental backup).
BackupId string `json:"backup_id"`
// BackupNum is a monotonically increasing number assigned to each backup in
// a series. The full backup as BackupNum equal to one and each incremental
// backup gets assigned the next available number. Used to verify the integrity
// of the data during a restore.
BackupNum uint64 `json:"backup_num"`
// Path is the path to the manifest file. This field is only used during
// processing and is not written to disk.
Path string `json:"-"`
}
// WriteBackup uses the request values to create a stream writer then hand off the data
// retrieval to stream.Orchestrate. The writer will create all the fd's needed to
// collect the data and later move to the target.
// Returns errors on failure, nil on success.
func (pr *Processor) WriteBackup(ctx context.Context) (*pb.Status, error) {
var emptyRes pb.Status
if err := ctx.Err(); err != nil {
return nil, err
}
uri, err := url.Parse(pr.Request.Destination)
if err != nil {
return &emptyRes, err
}
handler, err := NewUriHandler(uri)
if err != nil {
return &emptyRes, err
}
if err := handler.CreateBackupFile(uri, pr.Request); err != nil {
return &emptyRes, err
}
glog.V(3).Infof("Backup manifest version: %d", pr.Request.SinceTs)
predMap := make(map[string]struct{})
for _, pred := range pr.Request.Predicates {
predMap[pred] = struct{}{}
}
var maxVersion uint64
gzWriter := gzip.NewWriter(handler)
stream := pr.DB.NewStreamAt(pr.Request.ReadTs)
stream.LogPrefix = "Dgraph.Backup"
stream.KeyToList = toBackupList(pr.Request.SinceTs)
stream.ChooseKey = func(item *badger.Item) bool {
parsedKey := x.Parse(item.Key())
_, ok := predMap[parsedKey.Attr]
return ok
}
stream.Send = func(list *bpb.KVList) error {
for _, kv := range list.Kv {
if maxVersion < kv.Version {
maxVersion = kv.Version
}
}
return writeKVList(list, gzWriter)
}
if err := stream.Orchestrate(context.Background()); err != nil {
glog.Errorf("While taking backup: %v", err)
return &emptyRes, err
}
if maxVersion > pr.Request.ReadTs {
glog.Errorf("Max timestamp seen during backup (%d) is greater than readTs (%d)",
maxVersion, pr.Request.ReadTs)
}
glog.V(2).Infof("Backup group %d version: %d", pr.Request.GroupId, pr.Request.ReadTs)
if err = gzWriter.Close(); err != nil {
glog.Errorf("While closing gzipped writer: %v", err)
return &emptyRes, err
}
if err = handler.Close(); err != nil {
glog.Errorf("While closing handler: %v", err)
return &emptyRes, err
}
glog.Infof("Backup complete: group %d at %d", pr.Request.GroupId, pr.Request.ReadTs)
return &emptyRes, nil
}
// CompleteBackup will finalize a backup by writing the manifest at the backup destination.
func (pr *Processor) CompleteBackup(ctx context.Context, manifest *Manifest) error {
if err := ctx.Err(); err != nil {
return err
}
uri, err := url.Parse(pr.Request.Destination)
if err != nil {
return err
}
handler, err := NewUriHandler(uri)
if err != nil {
return err
}
if err := handler.CreateManifest(uri, pr.Request); err != nil {
return err
}
if err = json.NewEncoder(handler).Encode(manifest); err != nil {
return err
}
if err = handler.Close(); err != nil {
return err
}
glog.Infof("Backup completed OK.")
return nil
}
// GoString implements the GoStringer interface for Manifest.
func (m *Manifest) GoString() string {
return fmt.Sprintf(`Manifest{Since: %d, Groups: %v}`, m.Since, m.Groups)
}
func toBackupList(since uint64) func([]byte, *badger.Iterator) (*bpb.KVList, error) {
return func(key []byte, itr *badger.Iterator) (*bpb.KVList, error) {
list := &bpb.KVList{}
loop:
for itr.Valid() {
item := itr.Item()
if !bytes.Equal(item.Key(), key) {
break
}
if item.Version() < since {
// Ignore versions less than given timestamp, or skip older versions of
// the given key.
break
}
switch item.UserMeta() {
case posting.BitEmptyPosting, posting.BitCompletePosting, posting.BitDeltaPosting:
l, err := posting.ReadPostingList(key, itr)
if err != nil {
return nil, errors.Wrapf(err, "while reading posting list")
}
kvs, err := l.Rollup()
if err != nil {
return nil, errors.Wrapf(err, "while rolling up list")
}
for _, kv := range kvs {
backupKey, err := toBackupKey(kv.Key)
if err != nil {
return nil, err
}
kv.Key = backupKey
backupPl, err := toBackupPostingList(kv.Value)
if err != nil {
return nil, err
}
kv.Value = backupPl
}
list.Kv = append(list.Kv, kvs...)
case posting.BitSchemaPosting:
var valCopy []byte
if !item.IsDeletedOrExpired() {
// No need to copy value if item is deleted or expired.
var err error
valCopy, err = item.ValueCopy(nil)
if err != nil {
return nil, errors.Wrapf(err, "while copying value")
}
}
backupKey, err := toBackupKey(key)
if err != nil {
return nil, err
}
kv := &bpb.KV{
Key: backupKey,
Value: valCopy,
UserMeta: []byte{item.UserMeta()},
Version: item.Version(),
ExpiresAt: item.ExpiresAt(),
}
list.Kv = append(list.Kv, kv)
if item.DiscardEarlierVersions() || item.IsDeletedOrExpired() {
break loop
}
// Manually advance the iterator. This cannot be done in the for
// statement because ReadPostingList advances the iterator so this
// only needs to be done for BitSchemaPosting entries.
itr.Next()
default:
return nil, errors.Errorf(
"Unexpected meta: %d for key: %s", item.UserMeta(), hex.Dump(key))
}
}
return list, nil
}
}
func toBackupKey(key []byte) ([]byte, error) {
parsedKey := x.Parse(key)
if parsedKey == nil {
return nil, errors.Errorf("could not parse key %s", hex.Dump(key))
}
backupKey, err := parsedKey.ToBackupKey().Marshal()
if err != nil {
return nil, errors.Wrapf(err, "while converting key for backup")
}
return backupKey, nil
}
func toBackupPostingList(val []byte) ([]byte, error) {
pl := &pb.PostingList{}
if err := pl.Unmarshal(val); err != nil {
return nil, errors.Wrapf(err, "while reading posting list")
}
backupVal, err := posting.ToBackupPostingList(pl).Marshal()
if err != nil {
return nil, errors.Wrapf(err, "while converting posting list for backup")
}
return backupVal, nil
}
func writeKVList(list *bpb.KVList, w io.Writer) error {
if err := binary.Write(w, binary.LittleEndian, uint64(list.Size())); err != nil {
return err
}
buf, err := list.Marshal()
if err != nil {
return err
}
_, err = w.Write(buf)
return err
}