mirror of
https://github.com/redis/go-redis.git
synced 2025-07-16 13:21:51 +03:00
Merge branch 'master' into bitop
This commit is contained in:
6
error.go
6
error.go
@ -22,6 +22,12 @@ var ErrPoolExhausted = pool.ErrPoolExhausted
|
|||||||
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
|
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
|
||||||
var ErrPoolTimeout = pool.ErrPoolTimeout
|
var ErrPoolTimeout = pool.ErrPoolTimeout
|
||||||
|
|
||||||
|
// ErrCrossSlot is returned when keys are used in the same Redis command and
|
||||||
|
// the keys are not in the same hash slot. This error is returned by Redis
|
||||||
|
// Cluster and will be returned by the client when TxPipeline or TxPipelined
|
||||||
|
// is used on a ClusterClient with keys in different slots.
|
||||||
|
var ErrCrossSlot = proto.RedisError("CROSSSLOT Keys in request don't hash to the same slot")
|
||||||
|
|
||||||
// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
|
// HasErrorPrefix checks if the err is a Redis error and the message contains a prefix.
|
||||||
func HasErrorPrefix(err error, prefix string) bool {
|
func HasErrorPrefix(err error, prefix string) bool {
|
||||||
var rErr Error
|
var rErr Error
|
||||||
|
@ -325,6 +325,7 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
|
|||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
timer := timers.Get().(*time.Timer)
|
timer := timers.Get().(*time.Timer)
|
||||||
|
defer timers.Put(timer)
|
||||||
timer.Reset(p.cfg.PoolTimeout)
|
timer.Reset(p.cfg.PoolTimeout)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -332,7 +333,6 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
|
|||||||
if !timer.Stop() {
|
if !timer.Stop() {
|
||||||
<-timer.C
|
<-timer.C
|
||||||
}
|
}
|
||||||
timers.Put(timer)
|
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case p.queue <- struct{}{}:
|
case p.queue <- struct{}{}:
|
||||||
p.waitDurationNs.Add(time.Since(start).Nanoseconds())
|
p.waitDurationNs.Add(time.Since(start).Nanoseconds())
|
||||||
@ -340,10 +340,8 @@ func (p *ConnPool) waitTurn(ctx context.Context) error {
|
|||||||
if !timer.Stop() {
|
if !timer.Stop() {
|
||||||
<-timer.C
|
<-timer.C
|
||||||
}
|
}
|
||||||
timers.Put(timer)
|
|
||||||
return nil
|
return nil
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
timers.Put(timer)
|
|
||||||
atomic.AddUint32(&p.stats.Timeouts, 1)
|
atomic.AddUint32(&p.stats.Timeouts, 1)
|
||||||
return ErrPoolTimeout
|
return ErrPoolTimeout
|
||||||
}
|
}
|
||||||
|
@ -340,6 +340,7 @@ type clusterNode struct {
|
|||||||
latency uint32 // atomic
|
latency uint32 // atomic
|
||||||
generation uint32 // atomic
|
generation uint32 // atomic
|
||||||
failing uint32 // atomic
|
failing uint32 // atomic
|
||||||
|
loaded uint32 // atomic
|
||||||
|
|
||||||
// last time the latency measurement was performed for the node, stored in nanoseconds
|
// last time the latency measurement was performed for the node, stored in nanoseconds
|
||||||
// from epoch
|
// from epoch
|
||||||
@ -406,6 +407,7 @@ func (n *clusterNode) Latency() time.Duration {
|
|||||||
|
|
||||||
func (n *clusterNode) MarkAsFailing() {
|
func (n *clusterNode) MarkAsFailing() {
|
||||||
atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
|
atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
|
||||||
|
atomic.StoreUint32(&n.loaded, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *clusterNode) Failing() bool {
|
func (n *clusterNode) Failing() bool {
|
||||||
@ -449,11 +451,21 @@ func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *clusterNode) Loading() bool {
|
func (n *clusterNode) Loading() bool {
|
||||||
|
loaded := atomic.LoadUint32(&n.loaded)
|
||||||
|
if loaded == 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if the node is loading
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err := n.Client.Ping(ctx).Err()
|
err := n.Client.Ping(ctx).Err()
|
||||||
return err != nil && isLoadingError(err)
|
loading := err != nil && isLoadingError(err)
|
||||||
|
if !loading {
|
||||||
|
atomic.StoreUint32(&n.loaded, 1)
|
||||||
|
}
|
||||||
|
return loading
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
@ -1497,6 +1509,10 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err
|
|||||||
// Trim multi .. exec.
|
// Trim multi .. exec.
|
||||||
cmds = cmds[1 : len(cmds)-1]
|
cmds = cmds[1 : len(cmds)-1]
|
||||||
|
|
||||||
|
if len(cmds) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
state, err := c.state.Get(ctx)
|
state, err := c.state.Get(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setCmdsErr(cmds, err)
|
setCmdsErr(cmds, err)
|
||||||
@ -1504,6 +1520,12 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
cmdsMap := c.mapCmdsBySlot(cmds)
|
cmdsMap := c.mapCmdsBySlot(cmds)
|
||||||
|
// TxPipeline does not support cross slot transaction.
|
||||||
|
if len(cmdsMap) > 1 {
|
||||||
|
setCmdsErr(cmds, ErrCrossSlot)
|
||||||
|
return ErrCrossSlot
|
||||||
|
}
|
||||||
|
|
||||||
for slot, cmds := range cmdsMap {
|
for slot, cmds := range cmdsMap {
|
||||||
node, err := state.slotMasterNode(slot)
|
node, err := state.slotMasterNode(slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -462,8 +462,7 @@ var _ = Describe("ClusterClient", func() {
|
|||||||
Describe("pipelining", func() {
|
Describe("pipelining", func() {
|
||||||
var pipe *redis.Pipeline
|
var pipe *redis.Pipeline
|
||||||
|
|
||||||
assertPipeline := func() {
|
assertPipeline := func(keys []string) {
|
||||||
keys := []string{"A", "B", "C", "D", "E", "F", "G"}
|
|
||||||
|
|
||||||
It("follows redirects", func() {
|
It("follows redirects", func() {
|
||||||
if !failover {
|
if !failover {
|
||||||
@ -482,13 +481,12 @@ var _ = Describe("ClusterClient", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(cmds).To(HaveLen(14))
|
Expect(cmds).To(HaveLen(14))
|
||||||
|
|
||||||
_ = client.ForEachShard(ctx, func(ctx context.Context, node *redis.Client) error {
|
// Check that all keys are set.
|
||||||
defer GinkgoRecover()
|
for _, key := range keys {
|
||||||
Eventually(func() int64 {
|
Eventually(func() string {
|
||||||
return node.DBSize(ctx).Val()
|
return client.Get(ctx, key).Val()
|
||||||
}, 30*time.Second).ShouldNot(BeZero())
|
}, 30*time.Second).Should(Equal(key + "_value"))
|
||||||
return nil
|
}
|
||||||
})
|
|
||||||
|
|
||||||
if !failover {
|
if !failover {
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
@ -517,14 +515,14 @@ var _ = Describe("ClusterClient", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("works with missing keys", func() {
|
It("works with missing keys", func() {
|
||||||
pipe.Set(ctx, "A", "A_value", 0)
|
pipe.Set(ctx, "A{s}", "A_value", 0)
|
||||||
pipe.Set(ctx, "C", "C_value", 0)
|
pipe.Set(ctx, "C{s}", "C_value", 0)
|
||||||
_, err := pipe.Exec(ctx)
|
_, err := pipe.Exec(ctx)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
a := pipe.Get(ctx, "A")
|
a := pipe.Get(ctx, "A{s}")
|
||||||
b := pipe.Get(ctx, "B")
|
b := pipe.Get(ctx, "B{s}")
|
||||||
c := pipe.Get(ctx, "C")
|
c := pipe.Get(ctx, "C{s}")
|
||||||
cmds, err := pipe.Exec(ctx)
|
cmds, err := pipe.Exec(ctx)
|
||||||
Expect(err).To(Equal(redis.Nil))
|
Expect(err).To(Equal(redis.Nil))
|
||||||
Expect(cmds).To(HaveLen(3))
|
Expect(cmds).To(HaveLen(3))
|
||||||
@ -547,7 +545,8 @@ var _ = Describe("ClusterClient", func() {
|
|||||||
|
|
||||||
AfterEach(func() {})
|
AfterEach(func() {})
|
||||||
|
|
||||||
assertPipeline()
|
keys := []string{"A", "B", "C", "D", "E", "F", "G"}
|
||||||
|
assertPipeline(keys)
|
||||||
|
|
||||||
It("doesn't fail node with context.Canceled error", func() {
|
It("doesn't fail node with context.Canceled error", func() {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
@ -590,7 +589,25 @@ var _ = Describe("ClusterClient", func() {
|
|||||||
|
|
||||||
AfterEach(func() {})
|
AfterEach(func() {})
|
||||||
|
|
||||||
assertPipeline()
|
// TxPipeline doesn't support cross slot commands.
|
||||||
|
// Use hashtag to force all keys to the same slot.
|
||||||
|
keys := []string{"A{s}", "B{s}", "C{s}", "D{s}", "E{s}", "F{s}", "G{s}"}
|
||||||
|
assertPipeline(keys)
|
||||||
|
|
||||||
|
// make sure CrossSlot error is returned
|
||||||
|
It("returns CrossSlot error", func() {
|
||||||
|
pipe.Set(ctx, "A{s}", "A_value", 0)
|
||||||
|
pipe.Set(ctx, "B{t}", "B_value", 0)
|
||||||
|
Expect(hashtag.Slot("A{s}")).NotTo(Equal(hashtag.Slot("B{t}")))
|
||||||
|
_, err := pipe.Exec(ctx)
|
||||||
|
Expect(err).To(MatchError(redis.ErrCrossSlot))
|
||||||
|
})
|
||||||
|
|
||||||
|
// doesn't fail when no commands are queued
|
||||||
|
It("returns no error when there are no commands", func() {
|
||||||
|
_, err := pipe.Exec(ctx)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user