-
Notifications
You must be signed in to change notification settings - Fork 0
/
raft.go
273 lines (243 loc) · 7.6 KB
/
raft.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package upgrades
import (
"bytes"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"time"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/raft"
"github.com/juju/errors"
"github.com/juju/replicaset"
"github.com/juju/juju/agent"
"github.com/juju/juju/core/raftlease"
"github.com/juju/juju/feature"
raftworker "github.com/juju/juju/worker/raft"
)
// jujuMachineKey is the key for the replset member tag where we
// store the member's corresponding machine id.
const jujuMachineKey = "juju-machine-id"
// BootstrapRaft initialises the raft cluster in a controller that is
// being upgraded.
func BootstrapRaft(context Context) error {
agentConfig := context.AgentConfig()
storageDir := raftDir(agentConfig)
_, err := os.Stat(storageDir)
// If the storage dir already exists we shouldn't run again. (If
// we statted the dir successfully, this will return nil.)
if !os.IsNotExist(err) {
return err
}
_, transport := raft.NewInmemTransport(raft.ServerAddress("notused"))
defer transport.Close()
conf, err := raftworker.NewRaftConfig(raftworker.Config{
LocalID: raft.ServerID(agentConfig.Tag().Id()),
Logger: logger,
Transport: transport,
FSM: raftworker.BootstrapFSM{},
})
if err != nil {
return errors.Annotate(err, "getting raft config")
}
logStore, err := raftworker.NewLogStore(storageDir)
if err != nil {
return errors.Annotate(err, "making log store")
}
defer logStore.Close()
snapshotStore, err := raftworker.NewSnapshotStore(storageDir, 2, logger)
if err != nil {
return errors.Annotate(err, "making snapshot store")
}
st := context.State()
members, err := st.ReplicaSetMembers()
if err != nil {
return errors.Annotate(err, "getting replica set members")
}
info, err := st.StateServingInfo()
if err != nil {
return errors.Annotate(err, "getting state serving info")
}
servers, err := makeRaftServers(members, info.APIPort)
if err != nil {
return errors.Trace(err)
}
err = raft.BootstrapCluster(conf, logStore, logStore, snapshotStore, transport, servers)
return errors.Annotate(err, "bootstrapping raft cluster")
}
func raftDir(agentConfig agent.ConfigSetter) string {
return filepath.Join(agentConfig.DataDir(), "raft")
}
func makeRaftServers(members []replicaset.Member, apiPort int) (raft.Configuration, error) {
var empty raft.Configuration
var servers []raft.Server
for _, member := range members {
id, ok := member.Tags[jujuMachineKey]
if !ok {
return empty, errors.NotFoundf("juju machine id for replset member %d", member.Id)
}
baseAddress, _, err := net.SplitHostPort(member.Address)
if err != nil {
return empty, errors.Annotatef(err, "getting base address for replset member %d", member.Id)
}
apiAddress := net.JoinHostPort(baseAddress, strconv.Itoa(apiPort))
suffrage := raft.Voter
if member.Votes != nil && *member.Votes < 1 {
suffrage = raft.Nonvoter
}
server := raft.Server{
ID: raft.ServerID(id),
Address: raft.ServerAddress(apiAddress),
Suffrage: suffrage,
}
servers = append(servers, server)
}
return raft.Configuration{Servers: servers}, nil
}
// MigrateLegacyLeases converts leases in the legacy store into
// corresponding ones in the raft store.
func MigrateLegacyLeases(context Context) error {
// We know at this point in time that the raft workers aren't
// running - they're all guarded by the upgrade-steps gate.
// We need to migrate leases if:
// * legacy-leases is off,
// * there are some legacy leases,
// * and there are no snapshots in the snapshot store (which shows
// that the raft-lease store is already in use).
st := context.State()
controllerConfig, err := st.ControllerConfig()
if err != nil {
return errors.Annotate(err, "getting controller config")
}
if controllerConfig.Features().Contains(feature.LegacyLeases) {
logger.Debugf("legacy-leases flag is set, not migrating leases")
return nil
}
var zero time.Time
legacyLeases, err := st.LegacyLeases(zero)
if err != nil {
return errors.Annotate(err, "getting legacy leases")
}
if len(legacyLeases) == 0 {
logger.Debugf("no legacy leases to migrate")
return nil
}
storageDir := raftDir(context.AgentConfig())
snapshotStore, err := raftworker.NewSnapshotStore(
storageDir, 2, logger)
if err != nil {
return errors.Annotate(err, "opening snapshot store")
}
snapshots, err := snapshotStore.List()
if err != nil {
return errors.Annotate(err, "listing snapshots")
}
if len(snapshots) != 0 {
logger.Debugf("snapshots found in store - raft leases in use")
return nil
}
// We need the last term and index, latest configuration and
// configuration index from the log store.
logStore, err := raftworker.NewLogStore(storageDir)
if err != nil {
return errors.Annotate(err, "opening log store")
}
defer logStore.Close()
latest, configEntry, err := collectLogEntries(logStore)
if err != nil {
return errors.Trace(err)
}
configuration, err := decodeConfiguration(configEntry.Data)
if err != nil {
return errors.Annotate(err, "decoding configuration")
}
entries := make(map[raftlease.SnapshotKey]raftlease.SnapshotEntry, len(legacyLeases))
target := st.LeaseNotifyTarget(ioutil.Discard, logger)
// Populate the snapshot and the leaseholders collection.
for key, info := range legacyLeases {
entries[raftlease.SnapshotKey{
Namespace: key.Namespace,
ModelUUID: key.ModelUUID,
Lease: key.Lease,
}] = raftlease.SnapshotEntry{
Holder: info.Holder,
Start: zero,
Duration: info.Expiry.Sub(zero),
}
target.Claimed(key, info.Holder)
}
newSnapshot := raftlease.Snapshot{
Version: raftlease.SnapshotVersion,
Entries: entries,
GlobalTime: zero,
}
// Store the snapshot.
_, transport := raft.NewInmemTransport(raft.ServerAddress("notused"))
defer transport.Close()
sink, err := snapshotStore.Create(
raft.SnapshotVersionMax,
latest.Index,
latest.Term,
configuration,
configEntry.Index,
transport,
)
if err != nil {
return errors.Annotate(err, "creating snapshot sink")
}
defer sink.Close()
err = newSnapshot.Persist(sink)
if err != nil {
sink.Cancel()
return errors.Annotate(err, "persisting snapshot")
}
return nil
}
// collectLogEntries returns two log entries: the latest one, and the
// most recent configuration entry. (These might be the same.)
func collectLogEntries(store raft.LogStore) (*raft.Log, *raft.Log, error) {
var latest raft.Log
lastIndex, err := store.LastIndex()
if err != nil {
return nil, nil, errors.Annotate(err, "getting last index")
}
if lastIndex == 0 {
return nil, nil, errors.Errorf("no log entries, expected at least one for configuration")
}
err = store.GetLog(lastIndex, &latest)
if err != nil {
return nil, nil, errors.Annotate(err, "getting last log entry")
}
if latest.Type == raft.LogConfiguration {
return &latest, &latest, nil
}
firstIndex, err := store.FirstIndex()
if err != nil {
return nil, nil, errors.Annotate(err, "getting first index`")
}
current := lastIndex
for current > firstIndex {
current--
var entry raft.Log
err := store.GetLog(current, &entry)
if errors.Cause(err) == raft.ErrLogNotFound {
continue
} else if err != nil {
return nil, nil, errors.Annotatef(err, "getting log index %d", current)
}
if entry.Type == raft.LogConfiguration {
return &latest, &entry, nil
}
}
return nil, nil, errors.Errorf("no configuration entry in log")
}
func decodeConfiguration(data []byte) (raft.Configuration, error) {
var hd codec.MsgpackHandle
dec := codec.NewDecoder(bytes.NewBuffer(data), &hd)
var config raft.Configuration
err := dec.Decode(&config)
return config, errors.Trace(err)
}