Advertisement
Guest User

Untitled

a guest
Feb 21st, 2020
126
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 18.90 KB | None | 0 0
  1. package raft
  2.  
  3. //
  4. // this is an outline of the API that raft must expose to
  5. // the service (or tester). see comments below for
  6. // each of these functions for more details.
  7. //
  8. // rf = Make(...)
  9. // create a new Raft server.
  10. // rf.Start(command interface{}) (index, term, isleader)
  11. // start agreement on a new log entry
  12. // rf.GetState() (term, isLeader)
  13. // ask a Raft for its current term, and whether it thinks it is leader
  14. // ApplyMsg
  15. // each time a new entry is committed to the log, each Raft peer
  16. // should send an ApplyMsg to the service (or tester)
  17. // in the same server.
  18. //
  19.  
  20. import "sync"
  21. import "sync/atomic"
  22. import "../labrpc"
  23. import "fmt"
  24. // import "bytes"
  25. // import "../labgob"
  26. import "time"
  27. import "math/rand"
  28.  
  29.  
  30. //
  31. // as each Raft peer becomes aware that successive log entries are
  32. // committed, the peer should send an ApplyMsg to the service (or
  33. // tester) on the same server, via the applyCh passed to Make(). set
  34. // CommandValid to true to indicate that the ApplyMsg contains a newly
  35. // committed log entry.
  36. //
  37. // in Lab 3 you'll want to send other kinds of messages (e.g.,
  38. // snapshots) on the applyCh; at that point you can add fields to
  39. // ApplyMsg, but set CommandValid to false for these other uses.
  40. //
  41. type ApplyMsg struct {
  42. CommandValid bool
  43. Command interface{}
  44. CommandIndex int
  45. }
  46.  
  47. type LogEntry struct {
  48. Term int
  49. Command interface{}
  50. }
  51.  
  52. type serverState struct {
  53. Leader bool
  54. Candidate bool
  55. Follower bool
  56. }
  57.  
  58.  
  59. //
  60. // A Go object implementing a single Raft peer.
  61. //
  62. type Raft struct {
  63. mu sync.Mutex // Lock to protect shared access to this peer's state
  64. peers []*labrpc.ClientEnd // RPC end points of all peers
  65. persister *Persister // Object to hold this peer's persisted state
  66. me int // this peer's index into peers[]
  67. dead int32 // set by Kill()
  68. currentTerm int
  69. votedFor int
  70. log []LogEntry
  71. serverState serverState
  72. commitIndex int
  73. lastApplied int
  74. lastHeardFromLeader time.Time
  75. votes []bool
  76. numHeardFrom int
  77. voteMutex sync.Mutex
  78. voteCond sync.Cond
  79. leaderSet bool
  80. // Your data here (2A, 2B, 2C).
  81. // Look at the paper's Figure 2 for a description of what
  82. // state a Raft server must maintain.
  83.  
  84. }
  85.  
  86. // return currentTerm and whether this server
  87. // believes it is the leader.
  88. func (rf *Raft) GetState() (int, bool) {
  89. rf.mu.Lock()
  90. defer rf.mu.Unlock()
  91. isLeader := rf.serverState.Leader
  92. return rf.currentTerm, isLeader
  93. }
  94.  
  95. //
  96. // save Raft's persistent state to stable storage,
  97. // where it can later be retrieved after a crash and restart.
  98. // see paper's Figure 2 for a description of what should be persistent.
  99. //
  100. func (rf *Raft) persist() {
  101. // Your code here (2C).
  102. // Example:
  103. // w := new(bytes.Buffer)
  104. // e := labgob.NewEncoder(w)
  105. // e.Encode(rf.xxx)
  106. // e.Encode(rf.yyy)
  107. // data := w.Bytes()
  108. // rf.persister.SaveRaftState(data)
  109. }
  110.  
  111.  
  112. //
  113. // restore previously persisted state.
  114. //
  115. func (rf *Raft) readPersist(data []byte) {
  116. if data == nil || len(data) < 1 { // bootstrap without any state?
  117. return
  118. }
  119. // Your code here (2C).
  120. // Example:
  121. // r := bytes.NewBuffer(data)
  122. // d := labgob.NewDecoder(r)
  123. // var xxx
  124. // var yyy
  125. // if d.Decode(&xxx) != nil ||
  126. // d.Decode(&yyy) != nil {
  127. // error...
  128. // } else {
  129. // rf.xxx = xxx
  130. // rf.yyy = yyy
  131. // }
  132. }
  133.  
  134.  
  135. //
  136. // example RequestVote RPC arguments structure.
  137. // field names must start with capital letters!
  138. //
  139. type RequestVoteArgs struct {
  140. // Your data here (2A, 2B).
  141. Term int
  142. CandidateID int
  143. LastLogIndex int
  144. LastLogTerm int
  145. }
  146.  
  147. //
  148. // example RequestVote RPC reply structure.
  149. // field names must start with capital letters!
  150. //
  151. type RequestVoteReply struct {
  152. // Your data here (2A).
  153. VoteGranted bool
  154. Term int
  155. }
  156.  
  157. //
  158. // example RequestVote RPC handler.
  159. //
  160. // 1. Reply false if term < currentTerm (§5.1)
  161. // 2. If votedFor is null or candidateId, and candidate’s log is at
  162. // least as up-to-date as receiver’s log, grant vote (§5.2, §5.4)
  163. func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
  164. // Your code here (2A, 2B).
  165. rf.mu.Lock()
  166. defer rf.mu.Unlock()
  167.  
  168. fmt.Printf("[%d] received vote request from %d. request term: %d curr term: %d\n", rf.me, args.CandidateID, args.Term, rf.currentTerm)
  169. if args.Term > rf.currentTerm {
  170. fmt.Printf("[%d] updating term %d ---> %d\n", rf.me, rf.currentTerm, args.Term)
  171. rf.currentTerm = args.Term
  172. rf.serverState.Follower = true
  173. rf.serverState.Leader = false
  174. rf.serverState.Candidate = false
  175. rf.votedFor = -1
  176. }
  177.  
  178. reply.Term = rf.currentTerm
  179.  
  180. if args.Term < rf.currentTerm {
  181. reply.VoteGranted = false
  182. } else if rf.votedFor == -1 {
  183. rf.votedFor = args.CandidateID
  184. reply.VoteGranted = true
  185. } else if rf.votedFor == args.CandidateID {
  186. panic("screm")
  187. } else {
  188. reply.VoteGranted = false
  189. }
  190. fmt.Printf("[%d] sent vote response to %d. term: %d voteGranted: %t\n", rf.me, args.CandidateID, reply.Term, reply.VoteGranted)
  191. }
  192.  
  193. //
  194. // example code to send a RequestVote RPC to a server.
  195. // server is the index of the target server in rf.peers[].
  196. // expects RPC arguments in args.
  197. // fills in *reply with RPC reply, so caller should
  198. // pass &reply.
  199. // the types of the args and reply passed to Call() must be
  200. // the same as the types of the arguments declared in the
  201. // handler function (including whether they are pointers).
  202. //
  203. // The labrpc package simulates a lossy network, in which servers
  204. // may be unreachable, and in which requests and replies may be lost.
  205. // Call() sends a request and waits for a reply. If a reply arrives
  206. // within a timeout interval, Call() returns true; otherwise
  207. // Call() returns false. Thus Call() may not return for a while.
  208. // A false return can be caused by a dead server, a live server that
  209. // can't be reached, a lost request, or a lost reply.
  210. //
  211. // Call() is guaranteed to return (perhaps after a delay) *except* if the
  212. // handler function on the server side does not return. Thus there
  213. // is no need to implement your own timeouts around Call().
  214. //
  215. // look at the comments in ../labrpc/labrpc.go for more details.
  216. //
  217. // if you're having trouble getting RPC to work, check that you've
  218. // capitalized all field names in structs passed over RPC, and
  219. // that the caller passes the address of the reply struct with &, not
  220. // the struct itself.
  221. //
  222.  
  223.  
  224. func (rf *Raft) startAgreement(command interface{}) {
  225.  
  226. }
  227.  
  228. //
  229. // the service using Raft (e.g. a k/v server) wants to start
  230. // agreement on the next command to be appended to Raft's log. if this
  231. // server isn't the leader, returns false. otherwise start the
  232. // agreement and return immediately. there is no guarantee that this
  233. // command will ever be committed to the Raft log, since the leader
  234. // may fail or lose an election. even if the Raft instance has been killed,
  235. // this function should return gracefully.
  236. //
  237. // the first return value is the index that the command will appear at
  238. // if it's ever committed. the second return value is the current
  239. // term. the third return value is true if this server believes it is
  240. // the leader.
  241. //
  242. func (rf *Raft) Start(command interface{}) (int, int, bool) {
  243. rf.mu.Lock()
  244. defer rf.mu.Unlock()
  245. index := len(rf.log)
  246. term := rf.currentTerm
  247. if rf.serverState.Leader {
  248. go rf.startAgreement(command)
  249. }
  250. return index, term, rf.serverState.Leader
  251. }
  252.  
  253. //
  254. // the tester calls Kill() when a Raft instance won't
  255. // be needed again. for your convenience, we supply
  256. // code to set rf.dead (without needing a lock),
  257. // and a killed() method to test rf.dead in
  258. // long-running loops. you can also add your own
  259. // code to Kill(). you're not required to do anything
  260. // about this, but it may be convenient (for example)
  261. // to suppress debug output from a Kill()ed instance.
  262. //
  263. func (rf *Raft) Kill() {
  264. atomic.StoreInt32(&rf.dead, 1)
  265. // Your code here, if desired.
  266. }
  267.  
  268. func (rf *Raft) killed() bool {
  269. z := atomic.LoadInt32(&rf.dead)
  270. return z == 1
  271. }
  272.  
  273. //
  274. // the service or tester wants to create a Raft server. the ports
  275. // of all the Raft servers (including this one) are in peers[]. this
  276. // server's port is peers[me]. all the servers' peers[] arrays
  277. // have the same order. persister is a place for this server to
  278. // save its persistent state, and also initially holds the most
  279. // recent saved state, if any. applyCh is a channel on which the
  280. // tester or service expects Raft to send ApplyMsg messages.
  281. // Make() must return quickly, so it should start goroutines
  282. // for any long-running work.
  283. //
  284.  
  285. // func (rf *Raft) monitorElection() {
  286. // fmt.Printf("monitoring election %d \n", rf.me)
  287. // for {
  288. // // fmt.Printf("spinning %d \n", rf.me)
  289. // time.Sleep(10 * time.Millisecond)
  290.  
  291. // if rf.serverState.Candidate && rf.numHeardFrom > int((0.5 * float64(len(rf.peers)))) {
  292. // voteCount := 0
  293. // for i := 0; i < len(rf.votes); i++ {
  294. // if rf.votes[i] {
  295. // voteCount += 1
  296. // }
  297. // }
  298. // fmt.Printf("peer: %d, votes: %d/%d\n", rf.me, voteCount, len(rf.peers))
  299. // if voteCount > int((0.5 * float64(len(rf.peers)))) {
  300. // rf.mu.Lock()
  301. // fmt.Printf("%d became leader\n", rf.me)
  302. // rf.serverState.Leader = true
  303. // rf.serverState.Candidate = false
  304. // rf.serverState.Follower = false
  305. // rf.votes = make([]bool, len(rf.peers))
  306. // rf.numHeardFrom = 0
  307. // rf.mu.Unlock()
  308. // go rf.heartBeats()
  309. // return
  310. // }
  311. // } else if rf.serverState.Leader || rf.serverState.Follower {
  312. // fmt.Printf("canceling election %d\n", rf.me)
  313. // rf.mu.Lock()
  314. // rf.votes = make([]bool, len(rf.peers))
  315. // rf.numHeardFrom = 0
  316. // rf.mu.Unlock()
  317. // return
  318. // }
  319. // }
  320. // }
  321.  
  322.  
  323. // func (rf *Raft) startElection() {
  324. // rf.mu.Lock()
  325. // rf.currentTerm += 1
  326. // rf.serverState.Candidate = true
  327. // rf.serverState.Follower = false
  328. // rf.votes[rf.me] = true
  329.  
  330. // args := RequestVoteArgs{}
  331. // args.Term = rf.currentTerm
  332. // args.CandidateID = rf.me
  333.  
  334. // // args.LastLogIndex = rf.commitIndex
  335. // // args.LastLogTerm = rf.log[rf.commitIndex].Term
  336. // // args.LastLogTerm = rf.currentTerm // TODO FIX THIS !!!! ITS WRONG
  337.  
  338. // rf.mu.Unlock()
  339. // rf.mu.Lock()
  340. // rf.mu.Unlock()
  341. // replies := make([]RequestVoteReply, len(rf.peers))
  342. // for i := 0; i < len(rf.peers); i++ {
  343. // fmt.Printf("sent vote request to %d\n", i)
  344. // go rf.sendRequestVote(i, &args, &replies[i]) // TODO make sure pointer right
  345. // }
  346. // fmt.Println("sent vote requests")
  347.  
  348. // go rf.monitorElection()
  349.  
  350. // var wg sync.WaitGroup
  351. // for i := 0; i < 5; i++ {
  352. // wg.Add(1)
  353. // go func(x int) {
  354. // sendRPC(x)
  355. // wg.Done()
  356. // }(i)
  357. // }
  358. // wg.Wait()
  359.  
  360. // count := 0
  361. // finished := 0
  362. // var mu sync.Mutex
  363. // cond := sync.NewCond(&mu)
  364.  
  365. // for i := 0; i < 10; i++ {
  366. // go func() {
  367. // vote := sendRequestVote()
  368. // mu.Lock()
  369. // defer mu.Unlock()
  370. // if vote {
  371. // count++
  372. // }
  373. // finished++
  374. // cond.Broadcast()
  375. // }()
  376. // }
  377.  
  378. // mu.Lock()
  379. // for count < 5 && finished != 10 {
  380. // cond.Wait()
  381. // }
  382. // if count >= 5 {
  383. // println("received 5+ votes!")
  384. // } else {
  385. // println("lost")
  386. // }
  387. // mu.Unlock()
  388. // }
  389.  
  390.  
  391. type AppendEntriesRPCArgs struct {
  392. LeaderID int
  393. Term int
  394. }
  395.  
  396.  
  397. type AppendEntriesRPCReply struct {
  398. Term int
  399. }
  400.  
  401.  
  402. func (rf *Raft) AppendEntries(args *AppendEntriesRPCArgs, reply *AppendEntriesRPCReply) {
  403. rf.mu.Lock()
  404. defer rf.mu.Unlock()
  405. if args.Term >= rf.currentTerm {
  406. fmt.Printf("[%d] received heartbeat from correct/new term from leader %d. %d's term: %d leader's term: %d \n", rf.me, args.LeaderID, rf.me, rf.currentTerm, args.Term)
  407. rf.lastHeardFromLeader = time.Now()
  408. rf.currentTerm = args.Term
  409. rf.serverState.Candidate = false
  410. rf.serverState.Leader = false
  411. rf.serverState.Follower = true
  412. rf.votedFor = -1
  413. }
  414. reply.Term = rf.currentTerm
  415. fmt.Printf("[%d] received heartbeat from leader %d. %d's term: %d leader's term: %d \n", rf.me, args.LeaderID, rf.me, rf.currentTerm, args.Term)
  416. }
  417.  
  418.  
  419. func (rf *Raft) heartBeats() {
  420. for {
  421. if rf.killed() {
  422. continue
  423. }
  424. time.Sleep(120 * time.Millisecond)
  425. rf.mu.Lock()
  426. // fmt.Printf("[%d] acquired lock --- heartbeats\n", rf.me)
  427. curTerm := rf.currentTerm
  428. if rf.serverState.Leader {
  429. args := AppendEntriesRPCArgs{}
  430. args.LeaderID = rf.me
  431. args.Term = curTerm
  432. for i := 0; i < len(rf.peers); i++ {
  433. if i == rf.me {
  434. continue
  435. }
  436. fmt.Printf("[%d] send heartbeats to %d, term: %d\n", rf.me, i, curTerm)
  437. go func(x int) {
  438. reply := AppendEntriesRPCReply{}
  439. res := rf.peers[x].Call("Raft.AppendEntries", &args, &reply)
  440. if ! res{
  441. return
  442. }
  443. rf.mu.Lock()
  444. if reply.Term > rf.currentTerm {
  445. fmt.Printf("[%d] got heartbeat reply from %d, resetting term to %d & becoming a follower\n", rf.me, i, rf.currentTerm)
  446.  
  447. rf.currentTerm = reply.Term
  448. rf.serverState.Follower = true
  449. rf.serverState.Candidate = false
  450. rf.serverState.Leader = false
  451. rf.votedFor = -1
  452.  
  453. }
  454. rf.mu.Unlock()
  455. }(i)
  456. }
  457. }
  458. rf.mu.Unlock()
  459. // fmt.Printf("[%d] released lock --- heartbeats\n", rf.me)
  460. }
  461. }
  462.  
  463.  
  464. func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
  465. fmt.Printf("[%d] sending vote request to %d\n", rf.me, server)
  466. res := rf.peers[server].Call("Raft.RequestVote", args, reply)
  467.  
  468. if !res {
  469. return false
  470. }
  471.  
  472. fmt.Printf("[%d] received vote response from %d\n", rf.me, server)
  473. rf.mu.Lock()
  474. fmt.Printf("[%d] acquired lock --- sendRequestVote\n", rf.me)
  475.  
  476. if rf.currentTerm == reply.Term && reply.VoteGranted {
  477. rf.mu.Unlock()
  478. fmt.Printf("[%d] released lock --- sendRequestVote\n", rf.me)
  479. return true
  480. } else if !reply.VoteGranted && rf.currentTerm == reply.Term {
  481. rf.mu.Unlock()
  482. fmt.Printf("[%d] released lock --- sendRequestVote\n", rf.me)
  483. return false
  484. } else if reply.Term > rf.currentTerm { // cancel election if the term has increased
  485. rf.currentTerm = reply.Term
  486. rf.serverState.Candidate = false
  487. rf.serverState.Leader = false
  488. rf.serverState.Follower = true
  489. rf.votedFor = -1
  490. rf.mu.Unlock()
  491. fmt.Printf("[%d] released lock --- sendRequestVote\n", rf.me)
  492. return false
  493. } else if reply.Term < args.Term {
  494. // ignore vote
  495. rf.mu.Unlock()
  496. fmt.Printf("[%d] released lock --- sendRequestVote\n", rf.me)
  497. panic("didnt they get my email?")
  498. return false
  499. } else {
  500. fmt.Printf("args: %+v\n", args)
  501. fmt.Printf("rf: %+v\n", rf)
  502. panic("weird thing happened in vote aggregation, fix")
  503. rf.mu.Unlock()
  504. fmt.Printf("[%d] released lock --- sendRequestVote\n", rf.me)
  505. return false
  506. }
  507. }
  508.  
  509.  
  510. func (rf *Raft) startElection() {
  511. rf.mu.Lock()
  512. fmt.Printf("[%d] acquired lock --- startElection\n", rf.me)
  513. rf.currentTerm += 1
  514. fmt.Printf("[%d] starting election! new term: %d \n", rf.me, rf.currentTerm)
  515. ourTerm := rf.currentTerm
  516. rf.serverState.Candidate = true
  517. rf.serverState.Follower = false
  518. rf.serverState.Leader = false
  519. rf.votedFor = rf.me
  520.  
  521. args := RequestVoteArgs{}
  522. args.Term = rf.currentTerm
  523. args.CandidateID = rf.me
  524.  
  525. rf.mu.Unlock()
  526. fmt.Printf("[%d] released lock --- startElection\n", rf.me)
  527. replies := make([]RequestVoteReply, len(rf.peers))
  528.  
  529. count := 1
  530. finished := 0
  531. cond := sync.NewCond(&rf.voteMutex)
  532. fmt.Printf("[%d] about to send out votes\n", rf.me)
  533. for i := 0; i < len(rf.peers); i++ {
  534. if i == rf.me {
  535. continue
  536. }
  537. go func(x int) {
  538. vote := rf.sendRequestVote(x, &args, &replies[x])
  539. rf.voteMutex.Lock()
  540. if vote {
  541. count++
  542. }
  543. finished++
  544. cond.Broadcast()
  545. rf.voteMutex.Unlock()
  546. }(i)
  547. }
  548. fmt.Printf("[%d] waiting for vote results pt1\n", rf.me)
  549.  
  550. rf.voteMutex.Lock()
  551. fmt.Printf("[%d] waiting for vote results UNLOCKED VOTEMUTEX\n", rf.me)
  552.  
  553. for count <= (len(rf.peers) / 2) && finished != len(rf.peers) && rf.serverState.Candidate {
  554. cond.Wait()
  555. }
  556. rf.voteMutex.Unlock()
  557.  
  558. fmt.Printf("[%d] waiting for vote results pt2\n", rf.me)
  559. if count > (len(rf.peers) / 2) && rf.serverState.Candidate{
  560. rf.mu.Lock()
  561. fmt.Printf("[%d] acquired lock --- election pt 2\n", rf.me)
  562.  
  563. fmt.Printf("[%d] became leader in term %d \n", rf.me, rf.currentTerm)
  564. if rf.currentTerm != ourTerm {
  565. return
  566. }
  567. rf.serverState.Leader = true
  568. rf.serverState.Candidate = false
  569. rf.serverState.Follower = false
  570. rf.mu.Unlock()
  571. fmt.Printf("[%d] released lock --- election part 2 \n", rf.me)
  572. // go rf.heartBeats()
  573. return
  574. } else {
  575. rf.mu.Lock()
  576. fmt.Printf("[%d] acquired lock --- election pt 2\n", rf.me)
  577. rf.serverState.Leader = false
  578. rf.serverState.Candidate = false
  579. rf.serverState.Follower = true
  580. rf.votedFor = -1
  581. rf.mu.Unlock()
  582. fmt.Printf("[%d] released lock --- election part 2 \n", rf.me)
  583. }
  584.  
  585. fmt.Printf("[%d] failed election\n", rf.me)
  586. }
  587.  
  588.  
  589. func (rf *Raft) electionTimeoutHandler() {
  590. for {
  591. // see if we've heard from leader within the past X ms.
  592. // if we haven't, start new election.
  593. randInterval := rand.Int31n(500 - 300) + 300
  594. time.Sleep(time.Duration(rand.Int31n(randInterval)) * time.Millisecond)
  595.  
  596. if rf.killed() {
  597. continue
  598. }
  599.  
  600. rf.mu.Lock()
  601. if rf.serverState.Leader { // don't start elections if you're the leader
  602. // if rf.serverState.Leader {
  603. // fmt.Printf("[%d] currently leader, not starting election\n", rf.me)
  604. // } else {
  605. // fmt.Printf("[%d] currently candidate, not starting election\n", rf.me)
  606. // }
  607.  
  608. rf.mu.Unlock()
  609. continue
  610. } else if rf.serverState.Follower || rf.serverState.Candidate {
  611. dur := time.Since(rf.lastHeardFromLeader)
  612. fmt.Printf("[%d] time since last heard from leader: %s", rf.me, dur.String())
  613. if dur > 1000 * time.Millisecond {
  614. // start election, send out
  615. fmt.Printf("[%d] starting election!\n", rf.me)
  616. rf.lastHeardFromLeader = time.Now()
  617. go rf.startElection()
  618. }
  619. rf.mu.Unlock()
  620. fmt.Printf("[%d] released lock --- timeouthandler\n", rf.me)
  621. }
  622. // else {
  623. // panic("unclear what the server state is?")
  624. // }
  625. }
  626. }
  627.  
  628. // Modify Make() to create a background goroutine that will kick off
  629. // leader election periodically by sending out RequestVote RPCs when it
  630. // hasn't heard from another peer for a while. This way a peer will learn
  631. // who is the leader, if there is already a leader, or become the leader itself.
  632.  
  633. func Make(peers []*labrpc.ClientEnd, me int,
  634. persister *Persister, applyCh chan ApplyMsg) *Raft {
  635. rf := &Raft{}
  636. rf.peers = peers
  637. rf.persister = persister
  638. rf.me = me
  639. rf.serverState.Follower = true
  640. rf.votedFor = -1
  641. rf.serverState.Leader = false
  642. rf.serverState.Candidate = false
  643.  
  644. rf.log = make([]LogEntry, 0)
  645. rf.currentTerm = 0
  646.  
  647. // Your initialization code here (2A, 2B, 2C).
  648. fmt.Printf("[%d] creating server \n", me)
  649.  
  650. go rf.heartBeats()
  651. go rf.electionTimeoutHandler()
  652. // initialize from state persisted before a crash
  653. rf.readPersist(persister.ReadRaftState())
  654.  
  655. return rf
  656. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement