mirror of
https://github.com/redis/go-redis.git
synced 2025-07-28 06:42:00 +03:00
cluster: cleanup tests
This commit is contained in:
191
cluster_test.go
191
cluster_test.go
@ -50,7 +50,15 @@ func (s *clusterScenario) addrs() []string {
|
||||
|
||||
func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient {
|
||||
opt.Addrs = s.addrs()
|
||||
return redis.NewClusterClient(opt)
|
||||
client := redis.NewClusterClient(opt)
|
||||
Eventually(func() bool {
|
||||
state, err := client.GetState()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return state.IsConsistent()
|
||||
}, 30*time.Second).Should(BeTrue())
|
||||
return client
|
||||
}
|
||||
|
||||
func startCluster(scenario *clusterScenario) error {
|
||||
@ -116,45 +124,43 @@ func startCluster(scenario *clusterScenario) error {
|
||||
}
|
||||
|
||||
// Wait until all nodes have consistent info.
|
||||
wanted := []redis.ClusterSlot{{
|
||||
Start: 0,
|
||||
End: 4999,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8220",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8223",
|
||||
}},
|
||||
}, {
|
||||
Start: 5000,
|
||||
End: 9999,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8221",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8224",
|
||||
}},
|
||||
}, {
|
||||
Start: 10000,
|
||||
End: 16383,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8222",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8225",
|
||||
}},
|
||||
}}
|
||||
for _, client := range scenario.clients {
|
||||
err := eventually(func() error {
|
||||
res, err := client.ClusterSlots().Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wanted := []redis.ClusterSlot{
|
||||
{
|
||||
Start: 0,
|
||||
End: 4999,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8220",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8223",
|
||||
}},
|
||||
}, {
|
||||
Start: 5000,
|
||||
End: 9999,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8221",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8224",
|
||||
}},
|
||||
}, {
|
||||
Start: 10000,
|
||||
End: 16383,
|
||||
Nodes: []redis.ClusterNode{{
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8222",
|
||||
}, {
|
||||
Id: "",
|
||||
Addr: "127.0.0.1:8225",
|
||||
}},
|
||||
},
|
||||
}
|
||||
return assertSlotsEqual(res, wanted)
|
||||
}, 30*time.Second)
|
||||
if err != nil {
|
||||
@ -213,6 +219,7 @@ func stopCluster(scenario *clusterScenario) error {
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var _ = Describe("ClusterClient", func() {
|
||||
var failover bool
|
||||
var opt *redis.ClusterOptions
|
||||
var client *redis.ClusterClient
|
||||
|
||||
@ -233,15 +240,42 @@ var _ = Describe("ClusterClient", func() {
|
||||
Expect(cnt).To(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("follows redirects", func() {
|
||||
Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
|
||||
It("GET follows redirects", func() {
|
||||
err := client.Set("A", "VALUE", 0).Err()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
slot := hashtag.Slot("A")
|
||||
client.SwapSlotNodes(slot)
|
||||
if !failover {
|
||||
Eventually(func() int64 {
|
||||
nodes, err := client.Nodes("A")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return nodes[1].Client.DBSize().Val()
|
||||
}, 30*time.Second).Should(Equal(int64(1)))
|
||||
|
||||
Eventually(func() string {
|
||||
return client.Get("A").Val()
|
||||
}, 30*time.Second).Should(Equal("VALUE"))
|
||||
Eventually(func() error {
|
||||
return client.SwapNodes("A")
|
||||
}, 30*time.Second).ShouldNot(HaveOccurred())
|
||||
}
|
||||
|
||||
v, err := client.Get("A").Result()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(v).To(Equal("VALUE"))
|
||||
})
|
||||
|
||||
It("SET follows redirects", func() {
|
||||
if !failover {
|
||||
Eventually(func() error {
|
||||
return client.SwapNodes("A")
|
||||
}, 30*time.Second).ShouldNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err := client.Set("A", "VALUE", 0).Err()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
v, err := client.Get("A").Result()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(v).To(Equal("VALUE"))
|
||||
})
|
||||
|
||||
It("distributes keys", func() {
|
||||
@ -250,7 +284,8 @@ var _ = Describe("ClusterClient", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, master := range cluster.masters() {
|
||||
client.ForEachMaster(func(master *redis.Client) error {
|
||||
defer GinkgoRecover()
|
||||
Eventually(func() string {
|
||||
return master.Info("keyspace").Val()
|
||||
}, 30*time.Second).Should(Or(
|
||||
@ -258,7 +293,8 @@ var _ = Describe("ClusterClient", func() {
|
||||
ContainSubstring("keys=29"),
|
||||
ContainSubstring("keys=40"),
|
||||
))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
It("distributes keys when using EVAL", func() {
|
||||
@ -333,9 +369,12 @@ var _ = Describe("ClusterClient", func() {
|
||||
keys := []string{"A", "B", "C", "D", "E", "F", "G"}
|
||||
|
||||
It("follows redirects", func() {
|
||||
for _, key := range keys {
|
||||
slot := hashtag.Slot(key)
|
||||
client.SwapSlotNodes(slot)
|
||||
if !failover {
|
||||
for _, key := range keys {
|
||||
Eventually(func() error {
|
||||
return client.SwapNodes(key)
|
||||
}, 30*time.Second).ShouldNot(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
for i, key := range keys {
|
||||
@ -354,9 +393,12 @@ var _ = Describe("ClusterClient", func() {
|
||||
return nil
|
||||
})
|
||||
|
||||
for _, key := range keys {
|
||||
slot := hashtag.Slot(key)
|
||||
client.SwapSlotNodes(slot)
|
||||
if !failover {
|
||||
for _, key := range keys {
|
||||
Eventually(func() error {
|
||||
return client.SwapNodes(key)
|
||||
}, 30*time.Second).ShouldNot(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
@ -456,9 +498,10 @@ var _ = Describe("ClusterClient", func() {
|
||||
opt = redisClusterOptions()
|
||||
client = cluster.clusterClient(opt)
|
||||
|
||||
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||
err := client.ForEachMaster(func(master *redis.Client) error {
|
||||
return master.FlushDB().Err()
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
@ -469,7 +512,8 @@ var _ = Describe("ClusterClient", func() {
|
||||
})
|
||||
|
||||
It("returns pool stats", func() {
|
||||
Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
|
||||
stats := client.PoolStats()
|
||||
Expect(stats).To(BeAssignableToTypeOf(&redis.PoolStats{}))
|
||||
})
|
||||
|
||||
It("removes idle connections", func() {
|
||||
@ -489,8 +533,9 @@ var _ = Describe("ClusterClient", func() {
|
||||
opt.MaxRedirects = -1
|
||||
client := cluster.clusterClient(opt)
|
||||
|
||||
slot := hashtag.Slot("A")
|
||||
client.SwapSlotNodes(slot)
|
||||
Eventually(func() error {
|
||||
return client.SwapNodes("A")
|
||||
}, 30*time.Second).ShouldNot(HaveOccurred())
|
||||
|
||||
err := client.Get("A").Err()
|
||||
Expect(err).To(HaveOccurred())
|
||||
@ -627,6 +672,8 @@ var _ = Describe("ClusterClient", func() {
|
||||
|
||||
Describe("ClusterClient failover", func() {
|
||||
BeforeEach(func() {
|
||||
failover = true
|
||||
|
||||
opt = redisClusterOptions()
|
||||
opt.MinRetryBackoff = 250 * time.Millisecond
|
||||
opt.MaxRetryBackoff = time.Second
|
||||
@ -637,21 +684,34 @@ var _ = Describe("ClusterClient", func() {
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_ = client.ForEachSlave(func(slave *redis.Client) error {
|
||||
err = client.ForEachSlave(func(slave *redis.Client) error {
|
||||
defer GinkgoRecover()
|
||||
|
||||
Eventually(func() int64 {
|
||||
return slave.DBSize().Val()
|
||||
}, 30*time.Second).Should(Equal(int64(0)))
|
||||
|
||||
return slave.ClusterFailover().Err()
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
state, err := client.GetState()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(state.IsConsistent()).To(BeTrue())
|
||||
|
||||
for _, slave := range state.Slaves {
|
||||
err = slave.Client.ClusterFailover().Err()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
state, _ := client.LoadState()
|
||||
return state.IsConsistent()
|
||||
}, 30*time.Second).Should(BeTrue())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||
return master.FlushDB().Err()
|
||||
})
|
||||
failover = false
|
||||
Expect(client.Close()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -664,23 +724,28 @@ var _ = Describe("ClusterClient", func() {
|
||||
opt.RouteByLatency = true
|
||||
client = cluster.clusterClient(opt)
|
||||
|
||||
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||
err := client.ForEachMaster(func(master *redis.Client) error {
|
||||
return master.FlushDB().Err()
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_ = client.ForEachSlave(func(slave *redis.Client) error {
|
||||
err = client.ForEachSlave(func(slave *redis.Client) error {
|
||||
Eventually(func() int64 {
|
||||
return client.DBSize().Val()
|
||||
}, 30*time.Second).Should(Equal(int64(0)))
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||
return master.FlushDB().Err()
|
||||
err := client.ForEachSlave(func(slave *redis.Client) error {
|
||||
return slave.ReadWrite().Err()
|
||||
})
|
||||
Expect(client.Close()).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = client.Close()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
assertClusterClient()
|
||||
|
Reference in New Issue
Block a user