diff --git a/projects/raft-otel/Dockerfile b/projects/raft-otel/Dockerfile new file mode 100644 index 000000000..832285a6d --- /dev/null +++ b/projects/raft-otel/Dockerfile @@ -0,0 +1,16 @@ +FROM golang:1.22.3 + +WORKDIR / + +COPY go.mod go.sum ./ +RUN go mod download + +COPY *.go ./ +COPY main/*.go ./main/ +COPY raft_proto/*.go ./raft_proto/ + + +EXPOSE 7600 + +# Bit lazy not to build properly but that's not the main point of this exercise +CMD ["go", "run", "main/main.go"] \ No newline at end of file diff --git a/projects/raft-otel/Dockerfile.client b/projects/raft-otel/Dockerfile.client new file mode 100644 index 000000000..c66074ca0 --- /dev/null +++ b/projects/raft-otel/Dockerfile.client @@ -0,0 +1,16 @@ +FROM golang:1.22.3 + +WORKDIR / + +COPY go.mod go.sum ./ +RUN go mod download + +COPY *.go ./ +COPY client/*.go ./client/ +COPY raft_proto/*.go ./raft_proto/ + + +EXPOSE 7600 + +# Bit lazy not to build properly but that's not the main point of this exercise +CMD ["go", "run", "client/client.go"] \ No newline at end of file diff --git a/projects/raft-otel/client.go b/projects/raft-otel/client.go new file mode 100644 index 000000000..08a5d57be --- /dev/null +++ b/projects/raft-otel/client.go @@ -0,0 +1,60 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "os" + "raft/raft_proto" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const port = 7600 + +func main() { + addr := flag.String("dns", "raft", "dns address for raft cluster") + + if addr == nil || *addr == "" { + fmt.Printf("Must supply dns address of cluster\n") + os.Exit(1) + } + + time.Sleep(time.Second * 5) // wait for raft servers to come up + + ips, err := net.LookupIP(*addr) + if err != nil { + fmt.Printf("Could not get IPs: %v\n", err) + os.Exit(1) + } + + clients := make([]raft_proto.RaftKVServiceClient, 0) + + for _, ip := range ips { + fmt.Printf("Connecting to %s\n", ip.String()) + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", ip.String(), port), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("%v", err) + } + client := raft_proto.NewRaftKVServiceClient(conn) + clients = append(clients, client) + } + + for { + for _, c := range clients { + n := time.Now().Second() + res, err := c.Set(context.TODO(), &raft_proto.SetRequest{Keyname: "cursec", Value: fmt.Sprintf("%d", n)}) + fmt.Printf("Called set cursec %d, got %v, %v\n", n, res, err) + + time.Sleep(1 * time.Second) // allow consensus to happen + + getres, err := c.Get(context.TODO(), &raft_proto.GetRequest{Keyname: "cursec"}) + fmt.Printf("Called get cursec, got %v, %v\n", getres, err) + } + time.Sleep(5 * time.Second) + } +} diff --git a/projects/raft-otel/client/client.go b/projects/raft-otel/client/client.go new file mode 100644 index 000000000..08a5d57be --- /dev/null +++ b/projects/raft-otel/client/client.go @@ -0,0 +1,60 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "os" + "raft/raft_proto" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const port = 7600 + +func main() { + addr := flag.String("dns", "raft", "dns address for raft cluster") + + if addr == nil || *addr == "" { + fmt.Printf("Must supply dns address of cluster\n") + os.Exit(1) + } + + time.Sleep(time.Second * 5) // wait for raft servers to come up + + ips, err := net.LookupIP(*addr) + if err != nil { + fmt.Printf("Could not get IPs: %v\n", err) + os.Exit(1) + } + + clients := make([]raft_proto.RaftKVServiceClient, 0) + + for _, ip := range ips { + fmt.Printf("Connecting to %s\n", ip.String()) + conn, err := grpc.Dial(fmt.Sprintf("%s:%d", ip.String(), port), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("%v", err) + } + client := raft_proto.NewRaftKVServiceClient(conn) + clients = append(clients, client) + } + + for { + for _, c := range clients { + n := time.Now().Second() + res, err := c.Set(context.TODO(), &raft_proto.SetRequest{Keyname: "cursec", Value: fmt.Sprintf("%d", n)}) + fmt.Printf("Called set cursec %d, got %v, %v\n", n, res, err) + + time.Sleep(1 * time.Second) // allow consensus to happen + + getres, err := c.Get(context.TODO(), &raft_proto.GetRequest{Keyname: "cursec"}) + fmt.Printf("Called get cursec, got %v, %v\n", getres, err) + } + time.Sleep(5 * time.Second) + } +} diff --git a/projects/raft-otel/docker-compose.yaml b/projects/raft-otel/docker-compose.yaml new file mode 100644 index 000000000..088492b13 --- /dev/null +++ b/projects/raft-otel/docker-compose.yaml @@ -0,0 +1,12 @@ +services: + raft: + build: . + deploy: + mode: replicated + replicas: 3 + client: + build: + dockerfile: ./Dockerfile.client + deploy: + mode: replicated + replicas: 1 \ No newline at end of file diff --git a/projects/raft-otel/go.mod b/projects/raft-otel/go.mod new file mode 100644 index 000000000..989f930f6 --- /dev/null +++ b/projects/raft-otel/go.mod @@ -0,0 +1,16 @@ +module raft + +go 1.19 + +require ( + github.com/fortytw2/leaktest v1.3.0 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 +) + +require ( + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect +) diff --git a/projects/raft-otel/go.sum b/projects/raft-otel/go.sum new file mode 100644 index 000000000..038c3785e --- /dev/null +++ b/projects/raft-otel/go.sum @@ -0,0 +1,15 @@ +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/projects/raft-otel/main.go b/projects/raft-otel/main.go new file mode 100644 index 000000000..45dfe47f8 --- /dev/null +++ b/projects/raft-otel/main.go @@ -0,0 +1,113 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "raft" + "strings" + "syscall" + "time" +) + +const port = 7600 + +func main() { + addr := flag.String("dns", "raft", "dns address for raft cluster") + if_addr := flag.String("if", "eth0", "use IPV4 address of this interface") // eth0 works on docker, may vary for other platforms + + if addr == nil || *addr == "" { + fmt.Printf("Must supply dns address of cluster\n") + os.Exit(1) + } + + id := getOwnAddr(*if_addr) + fmt.Printf("My address/node ID is %s\n", id) + + ready := make(chan interface{}) + storage := raft.NewMapStorage() + commitChan := make(chan raft.CommitEntry) + server := raft.NewServer(id, id, storage, ready, commitChan, port) + server.Serve(raft.NewKV()) + + ips, err := net.LookupIP(*addr) + if err != nil { + fmt.Printf("Could not get IPs: %v\n", err) + os.Exit(1) + } + + // Connect to all peers with appropriate waits + // TODO: we only do this once, on startup - we really should periodically check to see if the DNS listing for peers has changed + for _, ip := range ips { + // if not own IP + if !ownAddr(ip, id) { + peerAddr := fmt.Sprintf("%s:%d", ip.String(), port) + + connected := false + for rt := 0; rt <= 3 && !connected; rt++ { + fmt.Printf("Connecting to peer %s\n", peerAddr) + err = server.ConnectToPeer(peerAddr, peerAddr) + if err == nil { + connected = true + } else { // probably just not started up yet, retry + fmt.Printf("Error connecting to peer: %+v", err) + time.Sleep(time.Duration(rt+1) * time.Second) + } + } + if err != nil { + fmt.Printf("Exhausted retries connecting to peer %s", peerAddr) + os.Exit(1) + } + } + } + + close(ready) // start raft server, peers are connected + + gracefulShutdown := make(chan os.Signal, 1) + signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM) + <-gracefulShutdown + server.DisconnectAll() + server.Shutdown() +} + +func getOwnAddr(intf string) string { + ifs, err := net.Interfaces() + if err != nil { + fmt.Printf("Could not get intf: %v\n", err) + os.Exit(1) + } + + for _, cif := range ifs { + if cif.Name == intf { + ads, _ := cif.Addrs() + for _, addr := range ads { + if isIPV4(addr.String()) { + ip := getIP(addr.String()) + return ip.String() + } + + } + } + } + + fmt.Printf("Could not find intf: %s\n", intf) + os.Exit(1) + return "" +} + +func isIPV4(addr string) bool { + parts := strings.Split(addr, "::") + return len(parts) == 1 +} + +func getIP(addr string) net.IP { + parts := strings.Split(addr, "/") + return net.ParseIP(parts[0]) +} + +func ownAddr(ip net.IP, myAddr string) bool { + res := ip.String() == myAddr + return res +} diff --git a/projects/raft-otel/main/main.go b/projects/raft-otel/main/main.go new file mode 100644 index 000000000..45dfe47f8 --- /dev/null +++ b/projects/raft-otel/main/main.go @@ -0,0 +1,113 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "raft" + "strings" + "syscall" + "time" +) + +const port = 7600 + +func main() { + addr := flag.String("dns", "raft", "dns address for raft cluster") + if_addr := flag.String("if", "eth0", "use IPV4 address of this interface") // eth0 works on docker, may vary for other platforms + + if addr == nil || *addr == "" { + fmt.Printf("Must supply dns address of cluster\n") + os.Exit(1) + } + + id := getOwnAddr(*if_addr) + fmt.Printf("My address/node ID is %s\n", id) + + ready := make(chan interface{}) + storage := raft.NewMapStorage() + commitChan := make(chan raft.CommitEntry) + server := raft.NewServer(id, id, storage, ready, commitChan, port) + server.Serve(raft.NewKV()) + + ips, err := net.LookupIP(*addr) + if err != nil { + fmt.Printf("Could not get IPs: %v\n", err) + os.Exit(1) + } + + // Connect to all peers with appropriate waits + // TODO: we only do this once, on startup - we really should periodically check to see if the DNS listing for peers has changed + for _, ip := range ips { + // if not own IP + if !ownAddr(ip, id) { + peerAddr := fmt.Sprintf("%s:%d", ip.String(), port) + + connected := false + for rt := 0; rt <= 3 && !connected; rt++ { + fmt.Printf("Connecting to peer %s\n", peerAddr) + err = server.ConnectToPeer(peerAddr, peerAddr) + if err == nil { + connected = true + } else { // probably just not started up yet, retry + fmt.Printf("Error connecting to peer: %+v", err) + time.Sleep(time.Duration(rt+1) * time.Second) + } + } + if err != nil { + fmt.Printf("Exhausted retries connecting to peer %s", peerAddr) + os.Exit(1) + } + } + } + + close(ready) // start raft server, peers are connected + + gracefulShutdown := make(chan os.Signal, 1) + signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM) + <-gracefulShutdown + server.DisconnectAll() + server.Shutdown() +} + +func getOwnAddr(intf string) string { + ifs, err := net.Interfaces() + if err != nil { + fmt.Printf("Could not get intf: %v\n", err) + os.Exit(1) + } + + for _, cif := range ifs { + if cif.Name == intf { + ads, _ := cif.Addrs() + for _, addr := range ads { + if isIPV4(addr.String()) { + ip := getIP(addr.String()) + return ip.String() + } + + } + } + } + + fmt.Printf("Could not find intf: %s\n", intf) + os.Exit(1) + return "" +} + +func isIPV4(addr string) bool { + parts := strings.Split(addr, "::") + return len(parts) == 1 +} + +func getIP(addr string) net.IP { + parts := strings.Split(addr, "/") + return net.ParseIP(parts[0]) +} + +func ownAddr(ip net.IP, myAddr string) bool { + res := ip.String() == myAddr + return res +} diff --git a/projects/raft-otel/notes.md b/projects/raft-otel/notes.md new file mode 100644 index 000000000..4a378daf0 --- /dev/null +++ b/projects/raft-otel/notes.md @@ -0,0 +1,23 @@ +# Sample Raft implementation - Incomplete Version + +This is based on Eli Bendersky's [https://eli.thegreenplace.net] RAFT demo code. + +I've modified it in a few ways: + * Adds a main.go so you can run the RAFT code as docker containers (or Kube) - peers are found via DNS lookup + * Changed from integer based peer IDs to use of IP addresses, so that the instances can come up without coordination with each other + * Changed from standard RPC to gRPC (as we've been using throughout this course) + * Adds Dockerfile and docker-compose.yml + * Added structure to Command (to simplify gRPCing) + * Removed one test (TestCrashAfterSubmit) as could not make the timing work to reliably crash leader before it had a chance to commit a change (would be easier to do this if code were restructured to inject time) + * Added endpoint for doing some sets/gets of data, and a simple client that calls this - to demo what's usually done with RAFT, also added a client that exercises it + + ## Building and running this project + + If you change the raft.proto protocol buffer definitions, you must regenerate the bindings by: + + ``` +protoc --proto_path=. --go_out=. --go-grpc_out=. raft.proto + ``` + + To run this under docker-compose, use `docker-compose up --build -d` or your preferred variant. + Docker must be installed and running. \ No newline at end of file diff --git a/projects/raft-otel/raft.go b/projects/raft-otel/raft.go new file mode 100644 index 000000000..a1031e86d --- /dev/null +++ b/projects/raft-otel/raft.go @@ -0,0 +1,739 @@ +// Core Raft implementation - Consensus Module. +// +// Eli Bendersky [https://eli.thegreenplace.net] +// This code is in the public domain. +package raft + +import ( + "bytes" + "encoding/gob" + "fmt" + "log" + "math/rand" + "os" + "sync" + "time" +) + +const DebugCM = 1 + +type CommandImpl struct { + Command string + Args []string +} + +// CommitEntry is the data reported by Raft to the commit channel. Each commit +// entry notifies the client that consensus was reached on a command and it can +// be applied to the client's state machine. +type CommitEntry struct { + // Command is the client command being committed. + Command CommandImpl + + // Index is the log index at which the client command is committed. + Index int + + // Term is the Raft term at which the client command is committed. + Term int +} + +type CMState int + +const ( + Follower CMState = iota + Candidate + Leader + Dead +) + +func (s CMState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Dead: + return "Dead" + default: + panic("unreachable") + } +} + +type LogEntry struct { + Command CommandImpl + Term int +} + +// ConsensusModule (CM) implements a single node of Raft consensus. +type ConsensusModule struct { + // mu protects concurrent access to a CM. + mu sync.Mutex + + // id is the server ID of this CM. + id string + + // peerIds lists the IDs of our peers in the cluster. + peerIds map[string]bool + + // server is the server containing this CM. It's used to issue RPC calls + // to peers. + server *Server + + // storage is used to persist state. + storage Storage + + // commitChan is the channel where this CM is going to report committed log + // entries. It's passed in by the client during construction. + commitChan chan<- CommitEntry + + // newCommitReadyChan is an internal notification channel used by goroutines + // that commit new entries to the log to notify that these entries may be sent + // on commitChan. + newCommitReadyChan chan struct{} + + // triggerAEChan is an internal notification channel used to trigger + // sending new AEs to followers when interesting changes occurred. + triggerAEChan chan struct{} + + // Persistent Raft state on all servers + currentTerm int + votedFor string + log []LogEntry + + // Volatile Raft state on all servers + commitIndex int + lastApplied int + state CMState + electionResetEvent time.Time + + // Volatile Raft state on leaders + nextIndex map[string]int + matchIndex map[string]int +} + +// NewConsensusModule creates a new CM with the given ID, list of peer IDs and +// server. The ready channel signals the CM that all peers are connected and +// it's safe to start its state machine. commitChan is going to be used by the +// CM to send log entries that have been committed by the Raft cluster. +func NewConsensusModule(id string, server *Server, storage Storage, ready <-chan interface{}, commitChan chan<- CommitEntry) *ConsensusModule { + cm := new(ConsensusModule) + cm.id = id + cm.peerIds = make(map[string]bool) + cm.server = server + cm.storage = storage + cm.commitChan = commitChan + cm.newCommitReadyChan = make(chan struct{}, 16) + cm.triggerAEChan = make(chan struct{}, 1) + cm.state = Follower + cm.votedFor = "" + cm.commitIndex = -1 + cm.lastApplied = -1 + cm.nextIndex = make(map[string]int) + cm.matchIndex = make(map[string]int) + + if cm.storage.HasData() { + cm.restoreFromStorage() + } + + go func() { + // The CM is dormant until ready is signaled; then, it starts a countdown + // for leader election. + <-ready + cm.mu.Lock() + cm.electionResetEvent = time.Now() + cm.mu.Unlock() + cm.runElectionTimer() + }() + + go cm.commitChanSender() + return cm +} + +func (cm *ConsensusModule) AddPeerID(peerId string) { + cm.peerIds[peerId] = true +} + +// Report reports the state of this CM. +func (cm *ConsensusModule) Report() (id string, term int, isLeader bool) { + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.id, cm.currentTerm, cm.state == Leader +} + +// Submit submits a new command to the CM. This function doesn't block; clients +// read the commit channel passed in the constructor to be notified of new +// committed entries. It returns true iff this CM is the leader - in which case +// the command is accepted. If false is returned, the client will have to find +// a different CM to submit this command to. +func (cm *ConsensusModule) Submit(command CommandImpl) bool { + cm.mu.Lock() + cm.dlog("Submit received by %v: %v", cm.state, command) + if cm.state == Leader { + cm.log = append(cm.log, LogEntry{Command: command, Term: cm.currentTerm}) + cm.persistToStorage() + cm.dlog("... log=%v", cm.log) + cm.mu.Unlock() + cm.triggerAEChan <- struct{}{} + return true + } + + cm.mu.Unlock() + return false +} + +// Stop stops this CM, cleaning up its state. This method returns quickly, but +// it may take a bit of time (up to ~election timeout) for all goroutines to +// exit. +func (cm *ConsensusModule) Stop() { + cm.mu.Lock() + defer cm.mu.Unlock() + cm.state = Dead + cm.dlog("becomes Dead") + close(cm.newCommitReadyChan) +} + +// restoreFromStorage restores the persistent state of this CM from storage. +// It should be called during constructor, before any concurrency concerns. +func (cm *ConsensusModule) restoreFromStorage() { + if termData, found := cm.storage.Get("currentTerm"); found { + d := gob.NewDecoder(bytes.NewBuffer(termData)) + if err := d.Decode(&cm.currentTerm); err != nil { + log.Fatal(err) + } + } else { + log.Fatal("currentTerm not found in storage") + } + if votedData, found := cm.storage.Get("votedFor"); found { + d := gob.NewDecoder(bytes.NewBuffer(votedData)) + if err := d.Decode(&cm.votedFor); err != nil { + log.Fatal(err) + } + } else { + log.Fatal("votedFor not found in storage") + } + if logData, found := cm.storage.Get("log"); found { + d := gob.NewDecoder(bytes.NewBuffer(logData)) + if err := d.Decode(&cm.log); err != nil { + log.Fatal(err) + } + } else { + log.Fatal("log not found in storage") + } +} + +// persistToStorage saves all of CM's persistent state in cm.storage. +// Expects cm.mu to be locked. +func (cm *ConsensusModule) persistToStorage() { + var termData bytes.Buffer + if err := gob.NewEncoder(&termData).Encode(cm.currentTerm); err != nil { + log.Fatal(err) + } + cm.storage.Set("currentTerm", termData.Bytes()) + + var votedData bytes.Buffer + if err := gob.NewEncoder(&votedData).Encode(cm.votedFor); err != nil { + log.Fatal(err) + } + cm.storage.Set("votedFor", votedData.Bytes()) + + var logData bytes.Buffer + if err := gob.NewEncoder(&logData).Encode(cm.log); err != nil { + log.Fatal(err) + } + cm.storage.Set("log", logData.Bytes()) +} + +// dlog logs a debugging message is DebugCM > 0. +func (cm *ConsensusModule) dlog(format string, args ...interface{}) { + if DebugCM > 0 { + format = fmt.Sprintf("[%s] ", cm.id) + format + log.Printf(format, args...) + } +} + +// See figure 2 in the paper. +type RequestVoteArgs struct { + Term int + CandidateId string + LastLogIndex int + LastLogTerm int +} + +type RequestVoteReply struct { + Term int + VoteGranted bool +} + +// RequestVote RPC. +func (cm *ConsensusModule) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) error { + cm.mu.Lock() + defer cm.mu.Unlock() + if cm.state == Dead { + return nil + } + lastLogIndex, lastLogTerm := cm.lastLogIndexAndTerm() + cm.dlog("RequestVote: %+v [currentTerm=%d, votedFor=%d, log index/term=(%d, %d)]", args, cm.currentTerm, cm.votedFor, lastLogIndex, lastLogTerm) + + if args.Term > cm.currentTerm { + cm.dlog("... term out of date in RequestVote") + cm.becomeFollower(args.Term) + } + + if cm.currentTerm == args.Term && + (cm.votedFor == "" || cm.votedFor == args.CandidateId) && + (args.LastLogTerm > lastLogTerm || + (args.LastLogTerm == lastLogTerm && args.LastLogIndex >= lastLogIndex)) { + reply.VoteGranted = true + cm.votedFor = args.CandidateId + cm.electionResetEvent = time.Now() + } else { + reply.VoteGranted = false + } + reply.Term = cm.currentTerm + cm.persistToStorage() + cm.dlog("... RequestVote reply: %+v", reply) + return nil +} + +// See figure 2 in the paper. +type AppendEntriesArgs struct { + Term int + LeaderId string + + PrevLogIndex int + PrevLogTerm int + Entries []LogEntry + LeaderCommit int +} + +type AppendEntriesReply struct { + Term int + Success bool + + // Faster conflict resolution optimization (described near the end of section + // 5.3 in the paper.) + ConflictIndex int + ConflictTerm int +} + +func (cm *ConsensusModule) AppendEntries(args AppendEntriesArgs, reply *AppendEntriesReply) error { + cm.mu.Lock() + defer cm.mu.Unlock() + if cm.state == Dead { + return nil + } + + if args.Term > cm.currentTerm { + cm.dlog("... term out of date in AppendEntries") + cm.becomeFollower(args.Term) + } + + reply.Success = false + if args.Term == cm.currentTerm { + if cm.state != Follower { + cm.becomeFollower(args.Term) + } + cm.electionResetEvent = time.Now() + + // Does our log contain an entry at PrevLogIndex whose term matches + // PrevLogTerm? Note that in the extreme case of PrevLogIndex=-1 this is + // vacuously true. + if args.PrevLogIndex == -1 || + (args.PrevLogIndex < len(cm.log) && args.PrevLogTerm == cm.log[args.PrevLogIndex].Term) { + reply.Success = true + + // Find an insertion point - where there's a term mismatch between + // the existing log starting at PrevLogIndex+1 and the new entries sent + // in the RPC. + logInsertIndex := args.PrevLogIndex + 1 + newEntriesIndex := 0 + + for { + if logInsertIndex >= len(cm.log) || newEntriesIndex >= len(args.Entries) { + break + } + if cm.log[logInsertIndex].Term != args.Entries[newEntriesIndex].Term { + break + } + logInsertIndex++ + newEntriesIndex++ + } + // At the end of this loop: + // - logInsertIndex points at the end of the log, or an index where the + // term mismatches with an entry from the leader + // - newEntriesIndex points at the end of Entries, or an index where the + // term mismatches with the corresponding log entry + if newEntriesIndex < len(args.Entries) { + cm.dlog("... inserting entries %v from index %d", args.Entries[newEntriesIndex:], logInsertIndex) + cm.log = append(cm.log[:logInsertIndex], args.Entries[newEntriesIndex:]...) + cm.dlog("... log is now: %v", cm.log) + } + + // Set commit index. + if args.LeaderCommit > cm.commitIndex { + cm.commitIndex = intMin(args.LeaderCommit, len(cm.log)-1) + cm.dlog("... setting commitIndex=%d", cm.commitIndex) + cm.newCommitReadyChan <- struct{}{} + } + } else { + // No match for PrevLogIndex/PrevLogTerm. Populate + // ConflictIndex/ConflictTerm to help the leader bring us up to date + // quickly. + if args.PrevLogIndex >= len(cm.log) { + reply.ConflictIndex = len(cm.log) + reply.ConflictTerm = -1 + } else { + // PrevLogIndex points within our log, but PrevLogTerm doesn't match + // cm.log[PrevLogIndex]. + reply.ConflictTerm = cm.log[args.PrevLogIndex].Term + + var i int + for i = args.PrevLogIndex - 1; i >= 0; i-- { + if cm.log[i].Term != reply.ConflictTerm { + break + } + } + reply.ConflictIndex = i + 1 + } + } + } + + reply.Term = cm.currentTerm + cm.persistToStorage() + cm.dlog("AppendEntries reply: %+v", *reply) + return nil +} + +// electionTimeout generates a pseudo-random election timeout duration. +func (cm *ConsensusModule) electionTimeout() time.Duration { + // If RAFT_FORCE_MORE_REELECTION is set, stress-test by deliberately + // generating a hard-coded number very often. This will create collisions + // between different servers and force more re-elections. + if len(os.Getenv("RAFT_FORCE_MORE_REELECTION")) > 0 && rand.Intn(3) == 0 { + return time.Duration(150) * time.Millisecond + } else { + return time.Duration(150+rand.Intn(150)) * time.Millisecond + } +} + +// runElectionTimer implements an election timer. It should be launched whenever +// we want to start a timer towards becoming a candidate in a new election. +// +// This function is blocking and should be launched in a separate goroutine; +// it's designed to work for a single (one-shot) election timer, as it exits +// whenever the CM state changes from follower/candidate or the term changes. +func (cm *ConsensusModule) runElectionTimer() { + timeoutDuration := cm.electionTimeout() + cm.mu.Lock() + termStarted := cm.currentTerm + cm.mu.Unlock() + cm.dlog("election timer started (%v), term=%d", timeoutDuration, termStarted) + + // This loops until either: + // - we discover the election timer is no longer needed, or + // - the election timer expires and this CM becomes a candidate + // In a follower, this typically keeps running in the background for the + // duration of the CM's lifetime. + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + <-ticker.C + + cm.mu.Lock() + if cm.state != Candidate && cm.state != Follower { + cm.dlog("in election timer state=%s, bailing out", cm.state) + cm.mu.Unlock() + return + } + + if termStarted != cm.currentTerm { + cm.dlog("in election timer term changed from %d to %d, bailing out", termStarted, cm.currentTerm) + cm.mu.Unlock() + return + } + + // Start an election if we haven't heard from a leader or haven't voted for + // someone for the duration of the timeout. + if elapsed := time.Since(cm.electionResetEvent); elapsed >= timeoutDuration { + cm.startElection() + cm.mu.Unlock() + return + } + cm.mu.Unlock() + } +} + +// startElection starts a new election with this CM as a candidate. +// Expects cm.mu to be locked. +func (cm *ConsensusModule) startElection() { + cm.state = Candidate + cm.currentTerm += 1 + savedCurrentTerm := cm.currentTerm + cm.electionResetEvent = time.Now() + cm.votedFor = cm.id + cm.dlog("becomes Candidate (currentTerm=%d); log=%v", savedCurrentTerm, cm.log) + + votesReceived := 1 + + // Send RequestVote RPCs to all other servers concurrently. + for peerId := range cm.peerIds { + go func(peerId string) { + cm.mu.Lock() + savedLastLogIndex, savedLastLogTerm := cm.lastLogIndexAndTerm() + cm.mu.Unlock() + + args := RequestVoteArgs{ + Term: savedCurrentTerm, + CandidateId: cm.id, + LastLogIndex: savedLastLogIndex, + LastLogTerm: savedLastLogTerm, + } + + cm.dlog("sending RequestVote to %s: %+v", peerId, args) + var reply RequestVoteReply + if err := cm.server.CallRequestVote(peerId, args, &reply); err == nil { + cm.mu.Lock() + defer cm.mu.Unlock() + cm.dlog("received RequestVoteReply %+v", reply) + + if cm.state != Candidate { + cm.dlog("while waiting for reply, state = %v", cm.state) + return + } + + if reply.Term > savedCurrentTerm { + cm.dlog("term out of date in RequestVoteReply") + cm.becomeFollower(reply.Term) + return + } else if reply.Term == savedCurrentTerm { + if reply.VoteGranted { + votesReceived += 1 + if votesReceived*2 > len(cm.peerIds)+1 { + // Won the election! + cm.dlog("wins election with %d votes", votesReceived) + cm.startLeader() + return + } + } + } + } + }(peerId) + } + + // Run another election timer, in case this election is not successful. + go cm.runElectionTimer() +} + +// becomeFollower makes cm a follower and resets its state. +// Expects cm.mu to be locked. +func (cm *ConsensusModule) becomeFollower(term int) { + cm.dlog("becomes Follower with term=%d; log=%v", term, cm.log) + cm.state = Follower + cm.currentTerm = term + cm.votedFor = "" + cm.electionResetEvent = time.Now() + + go cm.runElectionTimer() +} + +// startLeader switches cm into a leader state and begins process of heartbeats. +// Expects cm.mu to be locked. +func (cm *ConsensusModule) startLeader() { + cm.state = Leader + + for peerId := range cm.peerIds { + cm.nextIndex[peerId] = len(cm.log) + cm.matchIndex[peerId] = -1 + } + cm.dlog("becomes Leader; term=%d, nextIndex=%v, matchIndex=%v; log=%v", cm.currentTerm, cm.nextIndex, cm.matchIndex, cm.log) + + // This goroutine runs in the background and sends AEs to peers: + // * Whenever something is sent on triggerAEChan + // * ... Or every 50 ms, if no events occur on triggerAEChan + go func(heartbeatTimeout time.Duration) { + // Immediately send AEs to peers. + cm.leaderSendAEs() + + t := time.NewTimer(heartbeatTimeout) + defer t.Stop() + for { + doSend := false + select { + case <-t.C: + doSend = true + + // Reset timer to fire again after heartbeatTimeout. + t.Stop() + t.Reset(heartbeatTimeout) + case _, ok := <-cm.triggerAEChan: + if ok { + doSend = true + } else { + return + } + + // Reset timer for heartbeatTimeout. + if !t.Stop() { + <-t.C + } + t.Reset(heartbeatTimeout) + } + + if doSend { + // If this isn't a leader any more, stop the heartbeat loop. + cm.mu.Lock() + if cm.state != Leader { + cm.mu.Unlock() + return + } + cm.mu.Unlock() + cm.leaderSendAEs() + } + } + }(50 * time.Millisecond) +} + +// leaderSendAEs sends a round of AEs to all peers, collects their +// replies and adjusts cm's state. +func (cm *ConsensusModule) leaderSendAEs() { + cm.mu.Lock() + if cm.state != Leader { + cm.mu.Unlock() + return + } + savedCurrentTerm := cm.currentTerm + cm.mu.Unlock() + + for peerId := range cm.peerIds { + go func(peerId string) { + cm.mu.Lock() + ni := cm.nextIndex[peerId] + prevLogIndex := ni - 1 + prevLogTerm := -1 + if prevLogIndex >= 0 { + prevLogTerm = cm.log[prevLogIndex].Term + } + entries := cm.log[ni:] + + args := AppendEntriesArgs{ + Term: savedCurrentTerm, + LeaderId: cm.id, + PrevLogIndex: prevLogIndex, + PrevLogTerm: prevLogTerm, + Entries: entries, + LeaderCommit: cm.commitIndex, + } + cm.mu.Unlock() + cm.dlog("sending AppendEntries to %v: ni=%d, args=%+v", peerId, ni, args) + var reply AppendEntriesReply + if err := cm.server.CallAppendEntries(peerId, args, &reply); err == nil { + cm.mu.Lock() + defer cm.mu.Unlock() + if reply.Term > cm.currentTerm { + cm.dlog("term out of date in heartbeat reply") + cm.becomeFollower(reply.Term) + return + } + + if cm.state == Leader && savedCurrentTerm == reply.Term { + if reply.Success { + cm.nextIndex[peerId] = ni + len(entries) + cm.matchIndex[peerId] = cm.nextIndex[peerId] - 1 + + savedCommitIndex := cm.commitIndex + for i := cm.commitIndex + 1; i < len(cm.log); i++ { + if cm.log[i].Term == cm.currentTerm { + matchCount := 1 + for peerId := range cm.peerIds { + if cm.matchIndex[peerId] >= i { + matchCount++ + } + } + if matchCount*2 > len(cm.peerIds)+1 { + cm.commitIndex = i + } + } + } + cm.dlog("AppendEntries reply from %s success: nextIndex := %v, matchIndex := %v; commitIndex := %d", peerId, cm.nextIndex, cm.matchIndex, cm.commitIndex) + if cm.commitIndex != savedCommitIndex { + cm.dlog("leader sets commitIndex := %d", cm.commitIndex) + // Commit index changed: the leader considers new entries to be + // committed. Send new entries on the commit channel to this + // leader's clients, and notify followers by sending them AEs. + cm.newCommitReadyChan <- struct{}{} + cm.triggerAEChan <- struct{}{} + } + } else { + if reply.ConflictTerm >= 0 { + lastIndexOfTerm := -1 + for i := len(cm.log) - 1; i >= 0; i-- { + if cm.log[i].Term == reply.ConflictTerm { + lastIndexOfTerm = i + break + } + } + if lastIndexOfTerm >= 0 { + cm.nextIndex[peerId] = lastIndexOfTerm + 1 + } else { + cm.nextIndex[peerId] = reply.ConflictIndex + } + } else { + cm.nextIndex[peerId] = reply.ConflictIndex + } + cm.dlog("\nAppendEntries reply from %s !success: nextIndex := %d", peerId, cm.nextIndex[peerId]) + } + } + } + }(peerId) + } +} + +// lastLogIndexAndTerm returns the last log index and the last log entry's term +// (or -1 if there's no log) for this server. +// Expects cm.mu to be locked. +func (cm *ConsensusModule) lastLogIndexAndTerm() (int, int) { + if len(cm.log) > 0 { + lastIndex := len(cm.log) - 1 + return lastIndex, cm.log[lastIndex].Term + } else { + return -1, -1 + } +} + +// commitChanSender is responsible for sending committed entries on +// cm.commitChan. It watches newCommitReadyChan for notifications and calculates +// which new entries are ready to be sent. This method should run in a separate +// background goroutine; cm.commitChan may be buffered and will limit how fast +// the client consumes new committed entries. Returns when newCommitReadyChan is +// closed. +func (cm *ConsensusModule) commitChanSender() { + for range cm.newCommitReadyChan { + // Find which entries we have to apply. + cm.mu.Lock() + savedLastApplied := cm.lastApplied + var entries []LogEntry + if cm.commitIndex > cm.lastApplied { + entries = cm.log[cm.lastApplied+1 : cm.commitIndex+1] + cm.lastApplied = cm.commitIndex + } + cm.mu.Unlock() + cm.dlog("commitChanSender entries=%v, savedLastApplied=%d", entries, savedLastApplied) + + for i, entry := range entries { + cm.commitChan <- CommitEntry{ + Command: entry.Command, + Index: savedLastApplied + i + 1, + Term: entry.Term, + } + } + } + cm.dlog("commitChanSender done") +} + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/projects/raft-otel/raft.pb.go b/projects/raft-otel/raft.pb.go new file mode 100644 index 000000000..38eec3b6d --- /dev/null +++ b/projects/raft-otel/raft.pb.go @@ -0,0 +1,871 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.24.4 +// source: raft.proto + +package raft_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyname string `protobuf:"bytes,1,opt,name=keyname,proto3" json:"keyname,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *SetRequest) Reset() { + *x = SetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRequest) ProtoMessage() {} + +func (x *SetRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRequest.ProtoReflect.Descriptor instead. +func (*SetRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{0} +} + +func (x *SetRequest) GetKeyname() string { + if x != nil { + return x.Keyname + } + return "" +} + +func (x *SetRequest) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type SetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetResponse) Reset() { + *x = SetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetResponse) ProtoMessage() {} + +func (x *SetResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetResponse.ProtoReflect.Descriptor instead. +func (*SetResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{1} +} + +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyname string `protobuf:"bytes,1,opt,name=keyname,proto3" json:"keyname,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{2} +} + +func (x *GetRequest) GetKeyname() string { + if x != nil { + return x.Keyname + } + return "" +} + +type GetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{3} +} + +func (x *GetResponse) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type RequestVoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + CandidateId string `protobuf:"bytes,2,opt,name=candidateId,proto3" json:"candidateId,omitempty"` + LastLogIndex int64 `protobuf:"varint,3,opt,name=lastLogIndex,proto3" json:"lastLogIndex,omitempty"` + LastLogTerm int64 `protobuf:"varint,4,opt,name=lastLogTerm,proto3" json:"lastLogTerm,omitempty"` +} + +func (x *RequestVoteRequest) Reset() { + *x = RequestVoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestVoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestVoteRequest) ProtoMessage() {} + +func (x *RequestVoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestVoteRequest.ProtoReflect.Descriptor instead. +func (*RequestVoteRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{4} +} + +func (x *RequestVoteRequest) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *RequestVoteRequest) GetCandidateId() string { + if x != nil { + return x.CandidateId + } + return "" +} + +func (x *RequestVoteRequest) GetLastLogIndex() int64 { + if x != nil { + return x.LastLogIndex + } + return 0 +} + +func (x *RequestVoteRequest) GetLastLogTerm() int64 { + if x != nil { + return x.LastLogTerm + } + return 0 +} + +type RequestVoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + VoteGranted bool `protobuf:"varint,2,opt,name=voteGranted,proto3" json:"voteGranted,omitempty"` +} + +func (x *RequestVoteResponse) Reset() { + *x = RequestVoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestVoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestVoteResponse) ProtoMessage() {} + +func (x *RequestVoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestVoteResponse.ProtoReflect.Descriptor instead. +func (*RequestVoteResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{5} +} + +func (x *RequestVoteResponse) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *RequestVoteResponse) GetVoteGranted() bool { + if x != nil { + return x.VoteGranted + } + return false +} + +type AppendEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + PrevLogIndex int64 `protobuf:"varint,3,opt,name=prevLogIndex,proto3" json:"prevLogIndex,omitempty"` + PrevLogTerm int64 `protobuf:"varint,4,opt,name=prevLogTerm,proto3" json:"prevLogTerm,omitempty"` + Entries []*LogEntry `protobuf:"bytes,5,rep,name=entries,proto3" json:"entries,omitempty"` + LeaderCommit int64 `protobuf:"varint,6,opt,name=leaderCommit,proto3" json:"leaderCommit,omitempty"` +} + +func (x *AppendEntriesRequest) Reset() { + *x = AppendEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendEntriesRequest) ProtoMessage() {} + +func (x *AppendEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendEntriesRequest.ProtoReflect.Descriptor instead. +func (*AppendEntriesRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{6} +} + +func (x *AppendEntriesRequest) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *AppendEntriesRequest) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +func (x *AppendEntriesRequest) GetPrevLogIndex() int64 { + if x != nil { + return x.PrevLogIndex + } + return 0 +} + +func (x *AppendEntriesRequest) GetPrevLogTerm() int64 { + if x != nil { + return x.PrevLogTerm + } + return 0 +} + +func (x *AppendEntriesRequest) GetEntries() []*LogEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *AppendEntriesRequest) GetLeaderCommit() int64 { + if x != nil { + return x.LeaderCommit + } + return 0 +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Command *Command `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{7} +} + +func (x *LogEntry) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *LogEntry) GetCommand() *Command { + if x != nil { + return x.Command + } + return nil +} + +type Command struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"` + Args []string `protobuf:"bytes,2,rep,name=Args,proto3" json:"Args,omitempty"` +} + +func (x *Command) Reset() { + *x = Command{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Command) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Command) ProtoMessage() {} + +func (x *Command) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Command.ProtoReflect.Descriptor instead. +func (*Command) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{8} +} + +func (x *Command) GetCommand() string { + if x != nil { + return x.Command + } + return "" +} + +func (x *Command) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +type AppendEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + ConflictIndex int64 `protobuf:"varint,3,opt,name=conflictIndex,proto3" json:"conflictIndex,omitempty"` + ConflictTerm int64 `protobuf:"varint,4,opt,name=conflictTerm,proto3" json:"conflictTerm,omitempty"` +} + +func (x *AppendEntriesResponse) Reset() { + *x = AppendEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendEntriesResponse) ProtoMessage() {} + +func (x *AppendEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendEntriesResponse.ProtoReflect.Descriptor instead. +func (*AppendEntriesResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{9} +} + +func (x *AppendEntriesResponse) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *AppendEntriesResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *AppendEntriesResponse) GetConflictIndex() int64 { + if x != nil { + return x.ConflictIndex + } + return 0 +} + +func (x *AppendEntriesResponse) GetConflictTerm() int64 { + if x != nil { + return x.ConflictTerm + } + return 0 +} + +var File_raft_proto protoreflect.FileDescriptor + +var file_raft_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x72, 0x61, + 0x66, 0x74, 0x22, 0x3c, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x26, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x90, 0x01, 0x0a, + 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x64, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, + 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x73, + 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, + 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x22, + 0x4b, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x76, 0x6f, + 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x76, 0x6f, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x22, 0xd6, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, + 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x76, + 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x12, 0x28, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, + 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x22, 0x47, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x27, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x37, + 0x0a, 0x07, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x41, 0x72, 0x67, 0x73, 0x22, 0x8f, 0x01, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, + 0x74, 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x54, 0x65, 0x72, 0x6d, 0x32, 0x6b, 0x0a, 0x0d, 0x52, 0x61, 0x66, + 0x74, 0x4b, 0x56, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x53, 0x65, + 0x74, 0x12, 0x10, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x2c, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, + 0x10, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x9f, 0x01, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0d, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x2e, + 0x72, 0x61, 0x66, 0x74, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x61, 0x66, 0x74, + 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2f, 0x72, 0x61, 0x66, + 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_raft_proto_rawDescOnce sync.Once + file_raft_proto_rawDescData = file_raft_proto_rawDesc +) + +func file_raft_proto_rawDescGZIP() []byte { + file_raft_proto_rawDescOnce.Do(func() { + file_raft_proto_rawDescData = protoimpl.X.CompressGZIP(file_raft_proto_rawDescData) + }) + return file_raft_proto_rawDescData +} + +var file_raft_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_raft_proto_goTypes = []interface{}{ + (*SetRequest)(nil), // 0: raft.SetRequest + (*SetResponse)(nil), // 1: raft.SetResponse + (*GetRequest)(nil), // 2: raft.GetRequest + (*GetResponse)(nil), // 3: raft.GetResponse + (*RequestVoteRequest)(nil), // 4: raft.RequestVoteRequest + (*RequestVoteResponse)(nil), // 5: raft.RequestVoteResponse + (*AppendEntriesRequest)(nil), // 6: raft.AppendEntriesRequest + (*LogEntry)(nil), // 7: raft.LogEntry + (*Command)(nil), // 8: raft.Command + (*AppendEntriesResponse)(nil), // 9: raft.AppendEntriesResponse +} +var file_raft_proto_depIdxs = []int32{ + 7, // 0: raft.AppendEntriesRequest.entries:type_name -> raft.LogEntry + 8, // 1: raft.LogEntry.command:type_name -> raft.Command + 0, // 2: raft.RaftKVService.Set:input_type -> raft.SetRequest + 2, // 3: raft.RaftKVService.Get:input_type -> raft.GetRequest + 4, // 4: raft.RaftService.RequestVote:input_type -> raft.RequestVoteRequest + 6, // 5: raft.RaftService.AppendEntries:input_type -> raft.AppendEntriesRequest + 1, // 6: raft.RaftKVService.Set:output_type -> raft.SetResponse + 3, // 7: raft.RaftKVService.Get:output_type -> raft.GetResponse + 5, // 8: raft.RaftService.RequestVote:output_type -> raft.RequestVoteResponse + 9, // 9: raft.RaftService.AppendEntries:output_type -> raft.AppendEntriesResponse + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_raft_proto_init() } +func file_raft_proto_init() { + if File_raft_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_raft_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestVoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestVoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Command); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_raft_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_raft_proto_goTypes, + DependencyIndexes: file_raft_proto_depIdxs, + MessageInfos: file_raft_proto_msgTypes, + }.Build() + File_raft_proto = out.File + file_raft_proto_rawDesc = nil + file_raft_proto_goTypes = nil + file_raft_proto_depIdxs = nil +} diff --git a/projects/raft-otel/raft.proto b/projects/raft-otel/raft.proto new file mode 100644 index 000000000..c9a89abb9 --- /dev/null +++ b/projects/raft-otel/raft.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; +package raft; +option go_package = "/raft_proto"; + +service RaftKVService { + rpc Set(SetRequest) returns (SetResponse) {} + rpc Get(GetRequest) returns (GetResponse) {} +} + +message SetRequest { + string keyname = 1; + string value = 2; +} + +message SetResponse { +} + +message GetRequest { + string keyname = 1; +} + +message GetResponse { + string value = 1; +} + +service RaftService { + rpc RequestVote(RequestVoteRequest) returns (RequestVoteResponse) {} + rpc AppendEntries(AppendEntriesRequest) returns (AppendEntriesResponse) {} +} + +message RequestVoteRequest { + int64 term = 1; + string candidateId = 2; + int64 lastLogIndex = 3; + int64 lastLogTerm = 4; +} + +message RequestVoteResponse { + int64 term = 1; + bool voteGranted = 2; +} + +message AppendEntriesRequest { + int64 term = 1; + string leader = 2; + int64 prevLogIndex = 3; + int64 prevLogTerm = 4; + repeated LogEntry entries = 5; + int64 leaderCommit = 6; +} + + +message LogEntry { + int64 term = 1; + Command command = 2; +} + +message Command { + string Command = 1; + repeated string Args = 2; +} + + +message AppendEntriesResponse { + int64 term = 1; + bool success = 2; + int64 conflictIndex = 3; + int64 conflictTerm = 4; +} + diff --git a/projects/raft-otel/raft_grpc.pb.go b/projects/raft-otel/raft_grpc.pb.go new file mode 100644 index 000000000..d4299e885 --- /dev/null +++ b/projects/raft-otel/raft_grpc.pb.go @@ -0,0 +1,263 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v4.24.4 +// source: raft.proto + +package raft_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// RaftKVServiceClient is the client API for RaftKVService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RaftKVServiceClient interface { + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) +} + +type raftKVServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRaftKVServiceClient(cc grpc.ClientConnInterface) RaftKVServiceClient { + return &raftKVServiceClient{cc} +} + +func (c *raftKVServiceClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) { + out := new(SetResponse) + err := c.cc.Invoke(ctx, "/raft.RaftKVService/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftKVServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/raft.RaftKVService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RaftKVServiceServer is the server API for RaftKVService service. +// All implementations must embed UnimplementedRaftKVServiceServer +// for forward compatibility +type RaftKVServiceServer interface { + Set(context.Context, *SetRequest) (*SetResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + mustEmbedUnimplementedRaftKVServiceServer() +} + +// UnimplementedRaftKVServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRaftKVServiceServer struct { +} + +func (UnimplementedRaftKVServiceServer) Set(context.Context, *SetRequest) (*SetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (UnimplementedRaftKVServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedRaftKVServiceServer) mustEmbedUnimplementedRaftKVServiceServer() {} + +// UnsafeRaftKVServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RaftKVServiceServer will +// result in compilation errors. +type UnsafeRaftKVServiceServer interface { + mustEmbedUnimplementedRaftKVServiceServer() +} + +func RegisterRaftKVServiceServer(s grpc.ServiceRegistrar, srv RaftKVServiceServer) { + s.RegisterService(&RaftKVService_ServiceDesc, srv) +} + +func _RaftKVService_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftKVServiceServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftKVService/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftKVServiceServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftKVService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftKVServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftKVService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftKVServiceServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RaftKVService_ServiceDesc is the grpc.ServiceDesc for RaftKVService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RaftKVService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "raft.RaftKVService", + HandlerType: (*RaftKVServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _RaftKVService_Set_Handler, + }, + { + MethodName: "Get", + Handler: _RaftKVService_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "raft.proto", +} + +// RaftServiceClient is the client API for RaftService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RaftServiceClient interface { + RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) + AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) +} + +type raftServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRaftServiceClient(cc grpc.ClientConnInterface) RaftServiceClient { + return &raftServiceClient{cc} +} + +func (c *raftServiceClient) RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) { + out := new(RequestVoteResponse) + err := c.cc.Invoke(ctx, "/raft.RaftService/RequestVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftServiceClient) AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) { + out := new(AppendEntriesResponse) + err := c.cc.Invoke(ctx, "/raft.RaftService/AppendEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RaftServiceServer is the server API for RaftService service. +// All implementations must embed UnimplementedRaftServiceServer +// for forward compatibility +type RaftServiceServer interface { + RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) + AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) + mustEmbedUnimplementedRaftServiceServer() +} + +// UnimplementedRaftServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRaftServiceServer struct { +} + +func (UnimplementedRaftServiceServer) RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestVote not implemented") +} +func (UnimplementedRaftServiceServer) AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendEntries not implemented") +} +func (UnimplementedRaftServiceServer) mustEmbedUnimplementedRaftServiceServer() {} + +// UnsafeRaftServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RaftServiceServer will +// result in compilation errors. +type UnsafeRaftServiceServer interface { + mustEmbedUnimplementedRaftServiceServer() +} + +func RegisterRaftServiceServer(s grpc.ServiceRegistrar, srv RaftServiceServer) { + s.RegisterService(&RaftService_ServiceDesc, srv) +} + +func _RaftService_RequestVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServiceServer).RequestVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftService/RequestVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServiceServer).RequestVote(ctx, req.(*RequestVoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftService_AppendEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServiceServer).AppendEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftService/AppendEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServiceServer).AppendEntries(ctx, req.(*AppendEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RaftService_ServiceDesc is the grpc.ServiceDesc for RaftService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RaftService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "raft.RaftService", + HandlerType: (*RaftServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RequestVote", + Handler: _RaftService_RequestVote_Handler, + }, + { + MethodName: "AppendEntries", + Handler: _RaftService_AppendEntries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "raft.proto", +} diff --git a/projects/raft-otel/raft_proto/raft.pb.go b/projects/raft-otel/raft_proto/raft.pb.go new file mode 100644 index 000000000..38eec3b6d --- /dev/null +++ b/projects/raft-otel/raft_proto/raft.pb.go @@ -0,0 +1,871 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.24.4 +// source: raft.proto + +package raft_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyname string `protobuf:"bytes,1,opt,name=keyname,proto3" json:"keyname,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *SetRequest) Reset() { + *x = SetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRequest) ProtoMessage() {} + +func (x *SetRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRequest.ProtoReflect.Descriptor instead. +func (*SetRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{0} +} + +func (x *SetRequest) GetKeyname() string { + if x != nil { + return x.Keyname + } + return "" +} + +func (x *SetRequest) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type SetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetResponse) Reset() { + *x = SetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetResponse) ProtoMessage() {} + +func (x *SetResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetResponse.ProtoReflect.Descriptor instead. +func (*SetResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{1} +} + +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyname string `protobuf:"bytes,1,opt,name=keyname,proto3" json:"keyname,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{2} +} + +func (x *GetRequest) GetKeyname() string { + if x != nil { + return x.Keyname + } + return "" +} + +type GetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{3} +} + +func (x *GetResponse) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type RequestVoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + CandidateId string `protobuf:"bytes,2,opt,name=candidateId,proto3" json:"candidateId,omitempty"` + LastLogIndex int64 `protobuf:"varint,3,opt,name=lastLogIndex,proto3" json:"lastLogIndex,omitempty"` + LastLogTerm int64 `protobuf:"varint,4,opt,name=lastLogTerm,proto3" json:"lastLogTerm,omitempty"` +} + +func (x *RequestVoteRequest) Reset() { + *x = RequestVoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestVoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestVoteRequest) ProtoMessage() {} + +func (x *RequestVoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestVoteRequest.ProtoReflect.Descriptor instead. +func (*RequestVoteRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{4} +} + +func (x *RequestVoteRequest) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *RequestVoteRequest) GetCandidateId() string { + if x != nil { + return x.CandidateId + } + return "" +} + +func (x *RequestVoteRequest) GetLastLogIndex() int64 { + if x != nil { + return x.LastLogIndex + } + return 0 +} + +func (x *RequestVoteRequest) GetLastLogTerm() int64 { + if x != nil { + return x.LastLogTerm + } + return 0 +} + +type RequestVoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + VoteGranted bool `protobuf:"varint,2,opt,name=voteGranted,proto3" json:"voteGranted,omitempty"` +} + +func (x *RequestVoteResponse) Reset() { + *x = RequestVoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestVoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestVoteResponse) ProtoMessage() {} + +func (x *RequestVoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestVoteResponse.ProtoReflect.Descriptor instead. +func (*RequestVoteResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{5} +} + +func (x *RequestVoteResponse) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *RequestVoteResponse) GetVoteGranted() bool { + if x != nil { + return x.VoteGranted + } + return false +} + +type AppendEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + PrevLogIndex int64 `protobuf:"varint,3,opt,name=prevLogIndex,proto3" json:"prevLogIndex,omitempty"` + PrevLogTerm int64 `protobuf:"varint,4,opt,name=prevLogTerm,proto3" json:"prevLogTerm,omitempty"` + Entries []*LogEntry `protobuf:"bytes,5,rep,name=entries,proto3" json:"entries,omitempty"` + LeaderCommit int64 `protobuf:"varint,6,opt,name=leaderCommit,proto3" json:"leaderCommit,omitempty"` +} + +func (x *AppendEntriesRequest) Reset() { + *x = AppendEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendEntriesRequest) ProtoMessage() {} + +func (x *AppendEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendEntriesRequest.ProtoReflect.Descriptor instead. +func (*AppendEntriesRequest) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{6} +} + +func (x *AppendEntriesRequest) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *AppendEntriesRequest) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +func (x *AppendEntriesRequest) GetPrevLogIndex() int64 { + if x != nil { + return x.PrevLogIndex + } + return 0 +} + +func (x *AppendEntriesRequest) GetPrevLogTerm() int64 { + if x != nil { + return x.PrevLogTerm + } + return 0 +} + +func (x *AppendEntriesRequest) GetEntries() []*LogEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *AppendEntriesRequest) GetLeaderCommit() int64 { + if x != nil { + return x.LeaderCommit + } + return 0 +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Command *Command `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{7} +} + +func (x *LogEntry) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *LogEntry) GetCommand() *Command { + if x != nil { + return x.Command + } + return nil +} + +type Command struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"Command,omitempty"` + Args []string `protobuf:"bytes,2,rep,name=Args,proto3" json:"Args,omitempty"` +} + +func (x *Command) Reset() { + *x = Command{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Command) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Command) ProtoMessage() {} + +func (x *Command) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Command.ProtoReflect.Descriptor instead. +func (*Command) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{8} +} + +func (x *Command) GetCommand() string { + if x != nil { + return x.Command + } + return "" +} + +func (x *Command) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +type AppendEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term int64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + ConflictIndex int64 `protobuf:"varint,3,opt,name=conflictIndex,proto3" json:"conflictIndex,omitempty"` + ConflictTerm int64 `protobuf:"varint,4,opt,name=conflictTerm,proto3" json:"conflictTerm,omitempty"` +} + +func (x *AppendEntriesResponse) Reset() { + *x = AppendEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_raft_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendEntriesResponse) ProtoMessage() {} + +func (x *AppendEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_raft_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendEntriesResponse.ProtoReflect.Descriptor instead. +func (*AppendEntriesResponse) Descriptor() ([]byte, []int) { + return file_raft_proto_rawDescGZIP(), []int{9} +} + +func (x *AppendEntriesResponse) GetTerm() int64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *AppendEntriesResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *AppendEntriesResponse) GetConflictIndex() int64 { + if x != nil { + return x.ConflictIndex + } + return 0 +} + +func (x *AppendEntriesResponse) GetConflictTerm() int64 { + if x != nil { + return x.ConflictTerm + } + return 0 +} + +var File_raft_proto protoreflect.FileDescriptor + +var file_raft_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x72, 0x61, + 0x66, 0x74, 0x22, 0x3c, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x26, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6b, 0x65, 0x79, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x90, 0x01, 0x0a, + 0x12, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x64, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, + 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x73, + 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, + 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x22, + 0x4b, 0x0a, 0x13, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x76, 0x6f, + 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x76, 0x6f, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x22, 0xd6, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x76, 0x4c, 0x6f, 0x67, + 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x76, + 0x4c, 0x6f, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x12, 0x28, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, + 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x22, 0x47, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x27, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x37, + 0x0a, 0x07, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x41, 0x72, 0x67, 0x73, 0x22, 0x8f, 0x01, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, + 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, + 0x74, 0x54, 0x65, 0x72, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x54, 0x65, 0x72, 0x6d, 0x32, 0x6b, 0x0a, 0x0d, 0x52, 0x61, 0x66, + 0x74, 0x4b, 0x56, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x53, 0x65, + 0x74, 0x12, 0x10, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x2c, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, + 0x10, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x11, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x9f, 0x01, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x56, 0x6f, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0d, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x2e, + 0x72, 0x61, 0x66, 0x74, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x61, 0x66, 0x74, + 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2f, 0x72, 0x61, 0x66, + 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_raft_proto_rawDescOnce sync.Once + file_raft_proto_rawDescData = file_raft_proto_rawDesc +) + +func file_raft_proto_rawDescGZIP() []byte { + file_raft_proto_rawDescOnce.Do(func() { + file_raft_proto_rawDescData = protoimpl.X.CompressGZIP(file_raft_proto_rawDescData) + }) + return file_raft_proto_rawDescData +} + +var file_raft_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_raft_proto_goTypes = []interface{}{ + (*SetRequest)(nil), // 0: raft.SetRequest + (*SetResponse)(nil), // 1: raft.SetResponse + (*GetRequest)(nil), // 2: raft.GetRequest + (*GetResponse)(nil), // 3: raft.GetResponse + (*RequestVoteRequest)(nil), // 4: raft.RequestVoteRequest + (*RequestVoteResponse)(nil), // 5: raft.RequestVoteResponse + (*AppendEntriesRequest)(nil), // 6: raft.AppendEntriesRequest + (*LogEntry)(nil), // 7: raft.LogEntry + (*Command)(nil), // 8: raft.Command + (*AppendEntriesResponse)(nil), // 9: raft.AppendEntriesResponse +} +var file_raft_proto_depIdxs = []int32{ + 7, // 0: raft.AppendEntriesRequest.entries:type_name -> raft.LogEntry + 8, // 1: raft.LogEntry.command:type_name -> raft.Command + 0, // 2: raft.RaftKVService.Set:input_type -> raft.SetRequest + 2, // 3: raft.RaftKVService.Get:input_type -> raft.GetRequest + 4, // 4: raft.RaftService.RequestVote:input_type -> raft.RequestVoteRequest + 6, // 5: raft.RaftService.AppendEntries:input_type -> raft.AppendEntriesRequest + 1, // 6: raft.RaftKVService.Set:output_type -> raft.SetResponse + 3, // 7: raft.RaftKVService.Get:output_type -> raft.GetResponse + 5, // 8: raft.RaftService.RequestVote:output_type -> raft.RequestVoteResponse + 9, // 9: raft.RaftService.AppendEntries:output_type -> raft.AppendEntriesResponse + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_raft_proto_init() } +func file_raft_proto_init() { + if File_raft_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_raft_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestVoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestVoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Command); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_raft_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_raft_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_raft_proto_goTypes, + DependencyIndexes: file_raft_proto_depIdxs, + MessageInfos: file_raft_proto_msgTypes, + }.Build() + File_raft_proto = out.File + file_raft_proto_rawDesc = nil + file_raft_proto_goTypes = nil + file_raft_proto_depIdxs = nil +} diff --git a/projects/raft-otel/raft_proto/raft_grpc.pb.go b/projects/raft-otel/raft_proto/raft_grpc.pb.go new file mode 100644 index 000000000..d4299e885 --- /dev/null +++ b/projects/raft-otel/raft_proto/raft_grpc.pb.go @@ -0,0 +1,263 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v4.24.4 +// source: raft.proto + +package raft_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// RaftKVServiceClient is the client API for RaftKVService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RaftKVServiceClient interface { + Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) +} + +type raftKVServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRaftKVServiceClient(cc grpc.ClientConnInterface) RaftKVServiceClient { + return &raftKVServiceClient{cc} +} + +func (c *raftKVServiceClient) Set(ctx context.Context, in *SetRequest, opts ...grpc.CallOption) (*SetResponse, error) { + out := new(SetResponse) + err := c.cc.Invoke(ctx, "/raft.RaftKVService/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftKVServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/raft.RaftKVService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RaftKVServiceServer is the server API for RaftKVService service. +// All implementations must embed UnimplementedRaftKVServiceServer +// for forward compatibility +type RaftKVServiceServer interface { + Set(context.Context, *SetRequest) (*SetResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + mustEmbedUnimplementedRaftKVServiceServer() +} + +// UnimplementedRaftKVServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRaftKVServiceServer struct { +} + +func (UnimplementedRaftKVServiceServer) Set(context.Context, *SetRequest) (*SetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} +func (UnimplementedRaftKVServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedRaftKVServiceServer) mustEmbedUnimplementedRaftKVServiceServer() {} + +// UnsafeRaftKVServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RaftKVServiceServer will +// result in compilation errors. +type UnsafeRaftKVServiceServer interface { + mustEmbedUnimplementedRaftKVServiceServer() +} + +func RegisterRaftKVServiceServer(s grpc.ServiceRegistrar, srv RaftKVServiceServer) { + s.RegisterService(&RaftKVService_ServiceDesc, srv) +} + +func _RaftKVService_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftKVServiceServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftKVService/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftKVServiceServer).Set(ctx, req.(*SetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftKVService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftKVServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftKVService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftKVServiceServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RaftKVService_ServiceDesc is the grpc.ServiceDesc for RaftKVService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RaftKVService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "raft.RaftKVService", + HandlerType: (*RaftKVServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _RaftKVService_Set_Handler, + }, + { + MethodName: "Get", + Handler: _RaftKVService_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "raft.proto", +} + +// RaftServiceClient is the client API for RaftService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RaftServiceClient interface { + RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) + AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) +} + +type raftServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRaftServiceClient(cc grpc.ClientConnInterface) RaftServiceClient { + return &raftServiceClient{cc} +} + +func (c *raftServiceClient) RequestVote(ctx context.Context, in *RequestVoteRequest, opts ...grpc.CallOption) (*RequestVoteResponse, error) { + out := new(RequestVoteResponse) + err := c.cc.Invoke(ctx, "/raft.RaftService/RequestVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftServiceClient) AppendEntries(ctx context.Context, in *AppendEntriesRequest, opts ...grpc.CallOption) (*AppendEntriesResponse, error) { + out := new(AppendEntriesResponse) + err := c.cc.Invoke(ctx, "/raft.RaftService/AppendEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RaftServiceServer is the server API for RaftService service. +// All implementations must embed UnimplementedRaftServiceServer +// for forward compatibility +type RaftServiceServer interface { + RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) + AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) + mustEmbedUnimplementedRaftServiceServer() +} + +// UnimplementedRaftServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRaftServiceServer struct { +} + +func (UnimplementedRaftServiceServer) RequestVote(context.Context, *RequestVoteRequest) (*RequestVoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestVote not implemented") +} +func (UnimplementedRaftServiceServer) AppendEntries(context.Context, *AppendEntriesRequest) (*AppendEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendEntries not implemented") +} +func (UnimplementedRaftServiceServer) mustEmbedUnimplementedRaftServiceServer() {} + +// UnsafeRaftServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RaftServiceServer will +// result in compilation errors. +type UnsafeRaftServiceServer interface { + mustEmbedUnimplementedRaftServiceServer() +} + +func RegisterRaftServiceServer(s grpc.ServiceRegistrar, srv RaftServiceServer) { + s.RegisterService(&RaftService_ServiceDesc, srv) +} + +func _RaftService_RequestVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServiceServer).RequestVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftService/RequestVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServiceServer).RequestVote(ctx, req.(*RequestVoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftService_AppendEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServiceServer).AppendEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/raft.RaftService/AppendEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServiceServer).AppendEntries(ctx, req.(*AppendEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RaftService_ServiceDesc is the grpc.ServiceDesc for RaftService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RaftService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "raft.RaftService", + HandlerType: (*RaftServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RequestVote", + Handler: _RaftService_RequestVote_Handler, + }, + { + MethodName: "AppendEntries", + Handler: _RaftService_AppendEntries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "raft.proto", +} diff --git a/projects/raft-otel/raft_test.go b/projects/raft-otel/raft_test.go new file mode 100644 index 000000000..8bd04a9db --- /dev/null +++ b/projects/raft-otel/raft_test.go @@ -0,0 +1,565 @@ +// Eli Bendersky [https://eli.thegreenplace.net] +// This code is in the public domain. +package raft + +import ( + "testing" + "time" + + "github.com/fortytw2/leaktest" +) + +func TestElectionBasic(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + h.CheckSingleLeader() +} + +func TestElectionLeaderDisconnect(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + origLeaderId, origTerm := h.CheckSingleLeader() + + h.DisconnectPeer(origLeaderId) + sleepMs(350) + + newLeaderId, newTerm := h.CheckSingleLeader() + if newLeaderId == origLeaderId { + t.Errorf("want new leader to be different from orig leader") + } + if newTerm <= origTerm { + t.Errorf("want newTerm <= origTerm, got %d and %d", newTerm, origTerm) + } +} + +func TestElectionLeaderAndAnotherDisconnect(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + origLeaderId, _ := h.CheckSingleLeader() + + h.DisconnectPeer(origLeaderId) + otherId := (toInt(origLeaderId) + 1) % 3 + h.DisconnectPeer(str(otherId)) + + // No quorum. + sleepMs(450) + h.CheckNoLeader() + + // Reconnect one other server; now we'll have quorum. + h.ReconnectPeer(str(otherId)) + h.CheckSingleLeader() +} + +func TestDisconnectAllThenRestore(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + sleepMs(100) + // Disconnect all servers from the start. There will be no leader. + for i := 0; i < 3; i++ { + h.DisconnectPeer(str(i)) + } + sleepMs(450) + h.CheckNoLeader() + + // Reconnect all servers. A leader will be found. + for i := 0; i < 3; i++ { + h.ReconnectPeer(str(i)) + } + h.CheckSingleLeader() +} + +func TestElectionLeaderDisconnectThenReconnect(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + origLeaderId, _ := h.CheckSingleLeader() + + h.DisconnectPeer(origLeaderId) + + sleepMs(350) + newLeaderId, newTerm := h.CheckSingleLeader() + + h.ReconnectPeer(origLeaderId) + sleepMs(150) + + againLeaderId, againTerm := h.CheckSingleLeader() + + if newLeaderId != againLeaderId { + t.Errorf("again leader id got %s; want %s", againLeaderId, newLeaderId) + } + if againTerm != newTerm { + t.Errorf("again term got %d; want %d", againTerm, newTerm) + } +} + +func TestElectionLeaderDisconnectThenReconnect5(t *testing.T) { + defer leaktest.CheckTimeout(t, 100*time.Millisecond)() + + h := NewHarness(t, 5) + defer h.Shutdown() + + origLeaderId, _ := h.CheckSingleLeader() + + h.DisconnectPeer(origLeaderId) + sleepMs(150) + newLeaderId, newTerm := h.CheckSingleLeader() + + h.ReconnectPeer(origLeaderId) + sleepMs(150) + + againLeaderId, againTerm := h.CheckSingleLeader() + + if newLeaderId != againLeaderId { + t.Errorf("again leader id got %s; want %s", againLeaderId, newLeaderId) + } + if againTerm != newTerm { + t.Errorf("again term got %d; want %d", againTerm, newTerm) + } +} + +func TestElectionFollowerComesBack(t *testing.T) { + defer leaktest.CheckTimeout(t, 100*time.Millisecond)() + + h := NewHarness(t, 3) + defer h.Shutdown() + + origLeaderId, origTerm := h.CheckSingleLeader() + + otherId := (toInt(origLeaderId) + 1) % 3 + h.DisconnectPeer(str(otherId)) + time.Sleep(650 * time.Millisecond) + h.ReconnectPeer(str(otherId)) + sleepMs(150) + + // We can't have an assertion on the new leader id here because it depends + // on the relative election timeouts. We can assert that the term changed, + // however, which implies that re-election has occurred. + _, newTerm := h.CheckSingleLeader() + if newTerm <= origTerm { + t.Errorf("newTerm=%d, origTerm=%d", newTerm, origTerm) + } +} + +func TestElectionDisconnectLoop(t *testing.T) { + defer leaktest.CheckTimeout(t, 100*time.Millisecond)() + + h := NewHarness(t, 3) + defer h.Shutdown() + + for cycle := 0; cycle < 5; cycle++ { + leaderId, _ := h.CheckSingleLeader() + + h.DisconnectPeer(leaderId) + otherId := (toInt(leaderId) + 1) % 3 + h.DisconnectPeer(str(otherId)) + sleepMs(310) + h.CheckNoLeader() + + // Reconnect both. + h.ReconnectPeer(str(otherId)) + h.ReconnectPeer(leaderId) + + // Give it time to settle + sleepMs(150) + } +} + +func TestCommitOneCommand(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + sleepMs(500) + + origLeaderId, _ := h.CheckSingleLeader() + + tlog("submitting 42 to %d", origLeaderId) + isLeader := h.SubmitToServer(origLeaderId, 42) + if !isLeader { + t.Errorf("want id=%s leader, but it's not", origLeaderId) + } + + sleepMs(500) + h.CheckCommittedN(42, 3) +} + +func TestSubmitNonLeaderFails(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + origLeaderId, _ := h.CheckSingleLeader() + sid := (toInt(origLeaderId) + 1) % 3 + tlog("submitting 42 to %d", sid) + isLeader := h.SubmitToServer(str(sid), 42) + if isLeader { + t.Errorf("want id=%d !leader, but it is", sid) + } + sleepMs(10) +} + +func TestCommitMultipleCommands(t *testing.T) { + h := NewHarness(t, 3) + defer h.Shutdown() + + origLeaderId, _ := h.CheckSingleLeader() + + values := []int{42, 55, 81} + for _, v := range values { + tlog("submitting %d to %d", v, origLeaderId) + isLeader := h.SubmitToServer(origLeaderId, v) + if !isLeader { + t.Errorf("want id=%s leader, but it's not", origLeaderId) + } + sleepMs(100) + } + + sleepMs(250) + nc, i1 := h.CheckCommitted(42) + _, i2 := h.CheckCommitted(55) + if nc != 3 { + t.Errorf("want nc=3, got %d", nc) + } + if i1 >= i2 { + t.Errorf("want i1= i3 { + t.Errorf("want i2 0 +} diff --git a/projects/raft-otel/testharness.go b/projects/raft-otel/testharness.go new file mode 100644 index 000000000..0622833bc --- /dev/null +++ b/projects/raft-otel/testharness.go @@ -0,0 +1,414 @@ +// Test harness for writing tests for Raft. +// +// Eli Bendersky [https://eli.thegreenplace.net] +// This code is in the public domain. +package raft + +import ( + "fmt" + "log" + "math/rand" + "strconv" + "sync" + "testing" + "time" +) + +func init() { + log.SetFlags(log.Ltime | log.Lmicroseconds) + seed := time.Now().UnixNano() + fmt.Println("seed", seed) + rand.Seed(seed) +} + +type Harness struct { + mu sync.Mutex + + // cluster is a list of all the raft servers participating in a cluster. + cluster map[string]*Server + storage map[string]*MapStorage + + // commitChans has a channel per server in cluster with the commi channel for + // that server. + commitChans map[string]chan CommitEntry + + // commits at index i holds the sequence of commits made by server i so far. + // It is populated by goroutines that listen on the corresponding commitChans + // channel. + commits map[string][]CommitEntry + + // connected has a bool per server in cluster, specifying whether this server + // is currently connected to peers (if false, it's partitioned and no messages + // will pass to or from it). + connected map[string]bool + + // alive has a bool per server in cluster, specifying whether this server is + // currently alive (false means it has crashed and wasn't restarted yet). + // connected implies alive. + alive map[string]bool + + n int + t *testing.T +} + +// NewHarness creates a new test Harness, initialized with n servers connected +// to each other. +func NewHarness(t *testing.T, n int) *Harness { + ns := make(map[string]*Server) + connected := make(map[string]bool, n) + alive := make(map[string]bool, n) + commitChans := make(map[string]chan CommitEntry) + commits := make(map[string][]CommitEntry) + ready := make(chan interface{}) + storage := make(map[string]*MapStorage) + + // Create all Servers in this cluster, assign ids and peer ids. + for i := 0; i < n; i++ { + serverId := fmt.Sprintf("%d", i) + storage[serverId] = NewMapStorage() + commitChans[serverId] = make(chan CommitEntry) + ns[serverId] = NewServer(serverId, "127.0.0.1", storage[serverId], ready, commitChans[serverId], 7601+i) + ns[serverId].Serve(nil) + alive[serverId] = true + } + + // Connect all peers to each other. + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + if i != j { + ns[fmt.Sprintf("%d", i)].ConnectToPeer(fmt.Sprintf("%d", j), fmt.Sprintf("127.0.0.1:%d", 7601+j)) + } + } + connected[fmt.Sprintf("%d", i)] = true + } + close(ready) + + h := &Harness{ + cluster: ns, + storage: storage, + commitChans: commitChans, + commits: commits, + connected: connected, + alive: alive, + n: n, + t: t, + } + for i := 0; i < n; i++ { + go h.collectCommits(i) + } + return h +} + +// Shutdown shuts down all the servers in the harness and waits for them to +// stop running. +func (h *Harness) Shutdown() { + for i := 0; i < h.n; i++ { + h.cluster[str(i)].DisconnectAll() + h.connected[str(i)] = false + } + for i := 0; i < h.n; i++ { + if h.alive[str(i)] { + h.alive[str(i)] = false + h.cluster[str(i)].Shutdown() + } + } + for i := 0; i < h.n; i++ { + close(h.commitChans[str(i)]) + } +} + +// DisconnectPeer disconnects a server from all other servers in the cluster. +func (h *Harness) DisconnectPeer(id string) { + tlog("Disconnect %s", id) + h.cluster[id].DisconnectAll() + for peer, server := range h.cluster { + if peer != id { + server.DisconnectPeer(id) + } + } + h.connected[id] = false +} + +// ReconnectPeer connects a server to all other servers in the cluster. +func (h *Harness) ReconnectPeer(id string) { + tlog("Reconnect %s", id) + for peerId, peerServer := range h.cluster { + if peerId != id && h.alive[peerId] { + if err := peerServer.ConnectToPeer(id, fmt.Sprintf("127.0.0.1:%d", 7601+toInt(id))); err != nil { + h.t.Fatal(err) + } + if err := h.cluster[id].ConnectToPeer(peerId, fmt.Sprintf("127.0.0.1:%d", 7601+toInt(peerId))); err != nil { + h.t.Fatal(err) + } + } + } + h.connected[id] = true +} + +// CrashPeer "crashes" a server by disconnecting it from all peers and then +// asking it to shut down. We're not going to use the same server instance +// again, but its storage is retained. +func (h *Harness) CrashPeer(id string) { + tlog("Crash %s", id) + h.DisconnectPeer(id) + h.alive[id] = false + h.cluster[id].Shutdown() + + // Clear out the commits slice for the crashed server; Raft assumes the client + // has no persistent state. Once this server comes back online it will replay + // the whole log to us. + h.mu.Lock() + h.commits[id] = h.commits[id][:0] + h.mu.Unlock() +} + +// RestartPeer "restarts" a server by creating a new Server instance and giving +// it the appropriate storage, reconnecting it to peers. +func (h *Harness) RestartPeer(id string) { + if h.alive[id] { + log.Fatalf("id=%s is alive in RestartPeer", id) + } + tlog("Restart %s", id) + + ready := make(chan interface{}) + portOffset, _ := strconv.Atoi(id) // should not discard err if not in a test + + h.cluster[id] = NewServer(id, "127.0.0.1", h.storage[id], ready, h.commitChans[id], 7601+portOffset) + h.cluster[id].Serve(nil) + h.ReconnectPeer(id) + close(ready) + h.alive[id] = true + sleepMs(20) +} + +// CheckSingleLeader checks that only a single server thinks it's the leader. +// Returns the leader's id and term. It retries several times if no leader is +// identified yet. +func (h *Harness) CheckSingleLeader() (string, int) { + for r := 0; r < 8; r++ { + leaderId := -1 + leaderTerm := -1 + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + _, term, isLeader := h.cluster[str(i)].cm.Report() + if isLeader { + if leaderId < 0 { + leaderId = i + leaderTerm = term + } else { + h.t.Fatalf("both %d and %d think they're leaders", leaderId, i) + } + } + } + } + if leaderId >= 0 { + return str(leaderId), leaderTerm + } + time.Sleep(150 * time.Millisecond) + } + + h.t.Fatalf("leader not found") + return "", -1 +} + +// CheckNoLeader checks that no connected server considers itself the leader. +func (h *Harness) CheckNoLeader() { + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + _, _, isLeader := h.cluster[str(i)].cm.Report() + if isLeader { + h.t.Fatalf("server %d leader; want none", i) + } + } + } +} + +func (h *Harness) printCommits() { + fmt.Printf("Printing commits for connected servers\n") + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + + fmt.Printf("[%s] is connected, commits are\n", str(i)) + for _, commit := range h.commits[str(i)] { + fmt.Printf("Commit: %+v\n", commit) + } + } + } + +} + +// CheckCommitted verifies that all connected servers have cmd committed with +// the same index. It also verifies that all commands *before* cmd in +// the commit sequence match. For this to work properly, all commands submitted +// to Raft should be unique positive ints. +// Returns the number of servers that have this command committed, and its +// log index. +// TODO: this check may be too strict. Consider tha a server can commit +// something and crash before notifying the channel. It's a valid commit but +// this checker will fail because it may not match other servers. This scenario +// is described in the paper... +func (h *Harness) CheckCommitted(cmdNum int) (nc int, index int) { + cmd := cmdFor(cmdNum) + + h.mu.Lock() + defer h.mu.Unlock() + + h.printCommits() + + // Find the length of the commits slice for connected servers. + commitsLen := -1 + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + if commitsLen >= 0 { + // If this was set already, expect the new length to be the same. + if len(h.commits[str(i)]) != commitsLen { + h.t.Fatalf("Fatal error: commits[%d] = %v, want commitsLen = %d", i, h.commits[str(i)], commitsLen) + } + } else { + commitsLen = len(h.commits[str(i)]) + } + } + } + + // Check consistency of commits from the start and to the command we're asked + // about. This loop will return once a command=cmd is found. + for c := 0; c < commitsLen; c++ { + cmdAtC := cmdFor(-1) + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + cmdOfN := h.commits[str(i)][c].Command + if cmdInt(cmdAtC) >= 0 { + if !equalsCmd(cmdOfN, cmdAtC) { + h.t.Errorf("got %v, want %v at h.commits[%d][%d]", cmdOfN, cmdAtC, i, c) + } + } else { + cmdAtC = cmdOfN + } + } + } + if equalsCmd(cmdAtC, cmd) { + // Check consistency of Index. + index := -1 + nc := 0 + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + if index >= 0 && h.commits[str(i)][c].Index != index { + h.t.Errorf("got Index=%d, want %d at h.commits[%d][%d]", h.commits[str(i)][c].Index, index, i, c) + } else { + index = h.commits[str(i)][c].Index + } + nc++ + } + } + return nc, index + } + } + + // If there's no early return, we haven't found the command we were looking + // for. + h.t.Errorf("cmd=%v not found in commits", cmd) + return -1, -1 +} + +// CheckCommittedN verifies that cmd was committed by exactly n connected +// servers. +func (h *Harness) CheckCommittedN(cmd int, n int) { + nc, _ := h.CheckCommitted(cmd) + if nc != n { + h.t.Errorf("CheckCommittedN got nc=%d, want %d", nc, n) + } +} + +// CheckNotCommitted verifies that no command equal to cmd has been committed +// by any of the active servers yet. +func (h *Harness) CheckNotCommitted(cmdNum int) { + cmd := cmdFor(cmdNum) + h.mu.Lock() + defer h.mu.Unlock() + + for i := 0; i < h.n; i++ { + if h.connected[str(i)] { + for c := 0; c < len(h.commits[str(i)]); c++ { + gotCmd := h.commits[str(i)][c].Command + if equalsCmd(gotCmd, cmd) { + h.t.Errorf("found %v at commits[%d][%d], expected none", cmd, i, c) + } + } + } + } +} + +// SubmitToServer submits the command to serverId. +func (h *Harness) SubmitToServer(serverId string, cmdNum int) bool { + cmd := cmdFor(cmdNum) + return h.cluster[serverId].cm.Submit(cmd) +} + +func tlog(format string, a ...interface{}) { + format = "[TEST] " + format + log.Printf(format, a...) +} + +func sleepMs(n int) { + time.Sleep(time.Duration(n) * time.Millisecond) +} + +// collectCommits reads channel commitChans[i] and adds all received entries +// to the corresponding commits[i]. It's blocking and should be run in a +// separate goroutine. It returns when commitChans[i] is closed. +func (h *Harness) collectCommits(i int) { + for c := range h.commitChans[str(i)] { + h.mu.Lock() + tlog("collectCommits(%d) got %+v", i, c) + h.commits[str(i)] = append(h.commits[str(i)], c) + h.mu.Unlock() + } +} + +func str(i int) string { + return fmt.Sprintf("%d", i) +} + +func equalsCmd(a CommandImpl, b CommandImpl) bool { + if a.Command != b.Command { + return false + } + + if len(a.Args) != len(b.Args) { + return false + } + + for i := 0; i < len(a.Args); i++ { + if a.Args[i] != b.Args[i] { + return false + } + } + + return true +} + +func cmdFor(n int) CommandImpl { + c := CommandImpl{Command: "set", Args: make([]string, 0)} + c.Args = append(c.Args, "testkey") + c.Args = append(c.Args, str(n)) + + return c +} + +func cmdInt(c CommandImpl) int { + ns := c.Args[1] + v, err := strconv.Atoi(ns) + if err != nil { + log.Fatalf("can't parse %s as int", ns) + } + return v +} + +func toInt(str string) int { + i, err := strconv.Atoi(str) + + if err != nil { + log.Fatalf("can't convert %s to int", str) + } + return i +}