mirror of
https://github.com/redis/go-redis.git
synced 2025-07-28 06:42:00 +03:00
Merge branch 'v8'
This commit is contained in:
196
cluster.go
196
cluster.go
@ -13,10 +13,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v7/internal"
|
||||
"github.com/go-redis/redis/v7/internal/hashtag"
|
||||
"github.com/go-redis/redis/v7/internal/pool"
|
||||
"github.com/go-redis/redis/v7/internal/proto"
|
||||
"github.com/go-redis/redis/v8/internal"
|
||||
"github.com/go-redis/redis/v8/internal/hashtag"
|
||||
"github.com/go-redis/redis/v8/internal/pool"
|
||||
"github.com/go-redis/redis/v8/internal/proto"
|
||||
)
|
||||
|
||||
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
|
||||
@ -200,7 +200,7 @@ func (n *clusterNode) updateLatency() {
|
||||
var latency uint32
|
||||
for i := 0; i < probes; i++ {
|
||||
start := time.Now()
|
||||
n.Client.Ping()
|
||||
n.Client.Ping(context.TODO())
|
||||
probe := uint32(time.Since(start) / time.Microsecond)
|
||||
latency = (latency + probe) / 2
|
||||
}
|
||||
@ -597,20 +597,20 @@ func (c *clusterState) slotNodes(slot int) []*clusterNode {
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type clusterStateHolder struct {
|
||||
load func() (*clusterState, error)
|
||||
load func(ctx context.Context) (*clusterState, error)
|
||||
|
||||
state atomic.Value
|
||||
reloading uint32 // atomic
|
||||
}
|
||||
|
||||
func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder {
|
||||
func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
|
||||
return &clusterStateHolder{
|
||||
load: fn,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) Reload() (*clusterState, error) {
|
||||
state, err := c.load()
|
||||
func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
|
||||
state, err := c.load(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -618,14 +618,14 @@ func (c *clusterStateHolder) Reload() (*clusterState, error) {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) LazyReload() {
|
||||
func (c *clusterStateHolder) LazyReload(ctx context.Context) {
|
||||
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer atomic.StoreUint32(&c.reloading, 0)
|
||||
|
||||
_, err := c.Reload()
|
||||
_, err := c.Reload(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -633,24 +633,24 @@ func (c *clusterStateHolder) LazyReload() {
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) Get() (*clusterState, error) {
|
||||
func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
|
||||
v := c.state.Load()
|
||||
if v != nil {
|
||||
state := v.(*clusterState)
|
||||
if time.Since(state.createdAt) > time.Minute {
|
||||
c.LazyReload()
|
||||
c.LazyReload(ctx)
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
return c.Reload()
|
||||
return c.Reload(ctx)
|
||||
}
|
||||
|
||||
func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
|
||||
state, err := c.Reload()
|
||||
func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
|
||||
state, err := c.Reload(ctx)
|
||||
if err == nil {
|
||||
return state, nil
|
||||
}
|
||||
return c.Get()
|
||||
return c.Get(ctx)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@ -717,8 +717,8 @@ func (c *ClusterClient) Options() *ClusterOptions {
|
||||
|
||||
// ReloadState reloads cluster state. If available it calls ClusterSlots func
|
||||
// to get cluster slots information.
|
||||
func (c *ClusterClient) ReloadState() error {
|
||||
_, err := c.state.Reload()
|
||||
func (c *ClusterClient) ReloadState(ctx context.Context) error {
|
||||
_, err := c.state.Reload(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -731,21 +731,13 @@ func (c *ClusterClient) Close() error {
|
||||
}
|
||||
|
||||
// Do creates a Cmd from the args and processes the cmd.
|
||||
func (c *ClusterClient) Do(args ...interface{}) *Cmd {
|
||||
return c.DoContext(c.ctx, args...)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) DoContext(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(args...)
|
||||
_ = c.ProcessContext(ctx, cmd)
|
||||
func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
|
||||
cmd := NewCmd(ctx, args...)
|
||||
_ = c.Process(ctx, cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Process(cmd Cmder) error {
|
||||
return c.ProcessContext(c.ctx, cmd)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) ProcessContext(ctx context.Context, cmd Cmder) error {
|
||||
func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
|
||||
return c.hooks.process(ctx, cmd, c.process)
|
||||
}
|
||||
|
||||
@ -774,7 +766,7 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
|
||||
|
||||
if node == nil {
|
||||
var err error
|
||||
node, err = c.cmdNode(cmdInfo, slot)
|
||||
node, err = c.cmdNode(ctx, cmdInfo, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -782,13 +774,13 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
|
||||
|
||||
if ask {
|
||||
pipe := node.Client.Pipeline()
|
||||
_ = pipe.Process(NewCmd("asking"))
|
||||
_ = pipe.Process(cmd)
|
||||
_, lastErr = pipe.ExecContext(ctx)
|
||||
_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
|
||||
_ = pipe.Process(ctx, cmd)
|
||||
_, lastErr = pipe.Exec(ctx)
|
||||
_ = pipe.Close()
|
||||
ask = false
|
||||
} else {
|
||||
lastErr = node.Client.ProcessContext(ctx, cmd)
|
||||
lastErr = node.Client.Process(ctx, cmd)
|
||||
}
|
||||
|
||||
// If there is no error - we are done.
|
||||
@ -796,7 +788,7 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
|
||||
return nil
|
||||
}
|
||||
if lastErr != Nil {
|
||||
c.state.LazyReload()
|
||||
c.state.LazyReload(ctx)
|
||||
}
|
||||
if lastErr == pool.ErrClosed || isReadOnlyError(lastErr) {
|
||||
node = nil
|
||||
@ -841,8 +833,11 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
|
||||
|
||||
// ForEachMaster concurrently calls the fn on each master node in the cluster.
|
||||
// It returns the first error if any.
|
||||
func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
|
||||
state, err := c.state.ReloadOrGet()
|
||||
func (c *ClusterClient) ForEachMaster(
|
||||
ctx context.Context,
|
||||
fn func(ctx context.Context, client *Client) error,
|
||||
) error {
|
||||
state, err := c.state.ReloadOrGet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -854,7 +849,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
|
||||
wg.Add(1)
|
||||
go func(node *clusterNode) {
|
||||
defer wg.Done()
|
||||
err := fn(node.Client)
|
||||
err := fn(ctx, node.Client)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
@ -876,8 +871,11 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
|
||||
|
||||
// ForEachSlave concurrently calls the fn on each slave node in the cluster.
|
||||
// It returns the first error if any.
|
||||
func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
|
||||
state, err := c.state.ReloadOrGet()
|
||||
func (c *ClusterClient) ForEachSlave(
|
||||
ctx context.Context,
|
||||
fn func(ctx context.Context, client *Client) error,
|
||||
) error {
|
||||
state, err := c.state.ReloadOrGet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -889,7 +887,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
|
||||
wg.Add(1)
|
||||
go func(node *clusterNode) {
|
||||
defer wg.Done()
|
||||
err := fn(node.Client)
|
||||
err := fn(ctx, node.Client)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
@ -911,8 +909,11 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
|
||||
|
||||
// ForEachNode concurrently calls the fn on each known node in the cluster.
|
||||
// It returns the first error if any.
|
||||
func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
|
||||
state, err := c.state.ReloadOrGet()
|
||||
func (c *ClusterClient) ForEachNode(
|
||||
ctx context.Context,
|
||||
fn func(ctx context.Context, client *Client) error,
|
||||
) error {
|
||||
state, err := c.state.ReloadOrGet(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -922,7 +923,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
|
||||
|
||||
worker := func(node *clusterNode) {
|
||||
defer wg.Done()
|
||||
err := fn(node.Client)
|
||||
err := fn(ctx, node.Client)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
@ -954,7 +955,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
|
||||
func (c *ClusterClient) PoolStats() *PoolStats {
|
||||
var acc PoolStats
|
||||
|
||||
state, _ := c.state.Get()
|
||||
state, _ := c.state.Get(context.TODO())
|
||||
if state == nil {
|
||||
return &acc
|
||||
}
|
||||
@ -984,7 +985,7 @@ func (c *ClusterClient) PoolStats() *PoolStats {
|
||||
return &acc
|
||||
}
|
||||
|
||||
func (c *ClusterClient) loadState() (*clusterState, error) {
|
||||
func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
|
||||
if c.opt.ClusterSlots != nil {
|
||||
slots, err := c.opt.ClusterSlots()
|
||||
if err != nil {
|
||||
@ -1008,7 +1009,7 @@ func (c *ClusterClient) loadState() (*clusterState, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
slots, err := node.Client.ClusterSlots().Result()
|
||||
slots, err := node.Client.ClusterSlots(ctx).Result()
|
||||
if err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
@ -1051,8 +1052,8 @@ func (c *ClusterClient) Pipeline() Pipeliner {
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(fn)
|
||||
func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.Pipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
@ -1061,7 +1062,7 @@ func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error
|
||||
|
||||
func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
cmdsMap := newCmdsMap()
|
||||
err := c.mapCmdsByNode(cmdsMap, cmds)
|
||||
err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
@ -1088,7 +1089,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
|
||||
return
|
||||
}
|
||||
if attempt < c.opt.MaxRedirects {
|
||||
if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
|
||||
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
}
|
||||
} else {
|
||||
@ -1107,8 +1108,8 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
|
||||
return cmdsFirstErr(cmds)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) mapCmdsByNode(cmdsMap *cmdsMap, cmds []Cmder) error {
|
||||
state, err := c.state.Get()
|
||||
func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
|
||||
state, err := c.state.Get(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1159,21 +1160,25 @@ func (c *ClusterClient) _processPipelineNode(
|
||||
}
|
||||
|
||||
return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
|
||||
return c.pipelineReadCmds(node, rd, cmds, failedCmds)
|
||||
return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ClusterClient) pipelineReadCmds(
|
||||
node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
|
||||
ctx context.Context,
|
||||
node *clusterNode,
|
||||
rd *proto.Reader,
|
||||
cmds []Cmder,
|
||||
failedCmds *cmdsMap,
|
||||
) error {
|
||||
for _, cmd := range cmds {
|
||||
err := cmd.readReply(rd)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if c.checkMovedErr(cmd, err, failedCmds) {
|
||||
if c.checkMovedErr(ctx, cmd, err, failedCmds) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1190,7 +1195,7 @@ func (c *ClusterClient) pipelineReadCmds(
|
||||
}
|
||||
|
||||
func (c *ClusterClient) checkMovedErr(
|
||||
cmd Cmder, err error, failedCmds *cmdsMap,
|
||||
ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
|
||||
) bool {
|
||||
moved, ask, addr := isMovedError(err)
|
||||
if !moved && !ask {
|
||||
@ -1203,13 +1208,13 @@ func (c *ClusterClient) checkMovedErr(
|
||||
}
|
||||
|
||||
if moved {
|
||||
c.state.LazyReload()
|
||||
c.state.LazyReload(ctx)
|
||||
failedCmds.Add(node, cmd)
|
||||
return true
|
||||
}
|
||||
|
||||
if ask {
|
||||
failedCmds.Add(node, NewCmd("asking"), cmd)
|
||||
failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -1226,8 +1231,8 @@ func (c *ClusterClient) TxPipeline() Pipeliner {
|
||||
return &pipe
|
||||
}
|
||||
|
||||
func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(fn)
|
||||
func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
|
||||
return c.TxPipeline().Pipelined(ctx, fn)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
@ -1235,7 +1240,7 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err
|
||||
}
|
||||
|
||||
func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
|
||||
state, err := c.state.Get()
|
||||
state, err := c.state.Get(ctx)
|
||||
if err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
return err
|
||||
@ -1271,7 +1276,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
|
||||
return
|
||||
}
|
||||
if attempt < c.opt.MaxRedirects {
|
||||
if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
|
||||
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
|
||||
setCmdsErr(cmds, err)
|
||||
}
|
||||
} else {
|
||||
@ -1317,11 +1322,11 @@ func (c *ClusterClient) _processTxPipelineNode(
|
||||
// Trim multi and exec.
|
||||
cmds = cmds[1 : len(cmds)-1]
|
||||
|
||||
err := c.txPipelineReadQueued(rd, statusCmd, cmds, failedCmds)
|
||||
err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
|
||||
if err != nil {
|
||||
moved, ask, addr := isMovedError(err)
|
||||
if moved || ask {
|
||||
return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
|
||||
return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1333,7 +1338,11 @@ func (c *ClusterClient) _processTxPipelineNode(
|
||||
}
|
||||
|
||||
func (c *ClusterClient) txPipelineReadQueued(
|
||||
rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap,
|
||||
ctx context.Context,
|
||||
rd *proto.Reader,
|
||||
statusCmd *StatusCmd,
|
||||
cmds []Cmder,
|
||||
failedCmds *cmdsMap,
|
||||
) error {
|
||||
// Parse queued replies.
|
||||
if err := statusCmd.readReply(rd); err != nil {
|
||||
@ -1342,7 +1351,7 @@ func (c *ClusterClient) txPipelineReadQueued(
|
||||
|
||||
for _, cmd := range cmds {
|
||||
err := statusCmd.readReply(rd)
|
||||
if err == nil || c.checkMovedErr(cmd, err, failedCmds) || isRedisError(err) {
|
||||
if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
@ -1370,7 +1379,10 @@ func (c *ClusterClient) txPipelineReadQueued(
|
||||
}
|
||||
|
||||
func (c *ClusterClient) cmdsMoved(
|
||||
cmds []Cmder, moved, ask bool, addr string, failedCmds *cmdsMap,
|
||||
ctx context.Context, cmds []Cmder,
|
||||
moved, ask bool,
|
||||
addr string,
|
||||
failedCmds *cmdsMap,
|
||||
) error {
|
||||
node, err := c.nodes.Get(addr)
|
||||
if err != nil {
|
||||
@ -1378,7 +1390,7 @@ func (c *ClusterClient) cmdsMoved(
|
||||
}
|
||||
|
||||
if moved {
|
||||
c.state.LazyReload()
|
||||
c.state.LazyReload(ctx)
|
||||
for _, cmd := range cmds {
|
||||
failedCmds.Add(node, cmd)
|
||||
}
|
||||
@ -1387,7 +1399,7 @@ func (c *ClusterClient) cmdsMoved(
|
||||
|
||||
if ask {
|
||||
for _, cmd := range cmds {
|
||||
failedCmds.Add(node, NewCmd("asking"), cmd)
|
||||
failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1395,11 +1407,7 @@ func (c *ClusterClient) cmdsMoved(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
|
||||
return c.WatchContext(c.ctx, fn, keys...)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
|
||||
if len(keys) == 0 {
|
||||
return fmt.Errorf("redis: Watch requires at least one key")
|
||||
}
|
||||
@ -1412,7 +1420,7 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
|
||||
}
|
||||
}
|
||||
|
||||
node, err := c.slotMasterNode(slot)
|
||||
node, err := c.slotMasterNode(ctx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1424,12 +1432,12 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
|
||||
}
|
||||
}
|
||||
|
||||
err = node.Client.WatchContext(ctx, fn, keys...)
|
||||
err = node.Client.Watch(ctx, fn, keys...)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != Nil {
|
||||
c.state.LazyReload()
|
||||
c.state.LazyReload(ctx)
|
||||
}
|
||||
|
||||
moved, ask, addr := isMovedError(err)
|
||||
@ -1442,7 +1450,7 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
|
||||
}
|
||||
|
||||
if err == pool.ErrClosed || isReadOnlyError(err) {
|
||||
node, err = c.slotMasterNode(slot)
|
||||
node, err = c.slotMasterNode(ctx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1464,7 +1472,7 @@ func (c *ClusterClient) pubSub() *PubSub {
|
||||
pubsub := &PubSub{
|
||||
opt: c.opt.clientOptions(),
|
||||
|
||||
newConn: func(channels []string) (*pool.Conn, error) {
|
||||
newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
|
||||
if node != nil {
|
||||
panic("node != nil")
|
||||
}
|
||||
@ -1472,7 +1480,7 @@ func (c *ClusterClient) pubSub() *PubSub {
|
||||
var err error
|
||||
if len(channels) > 0 {
|
||||
slot := hashtag.Slot(channels[0])
|
||||
node, err = c.slotMasterNode(slot)
|
||||
node, err = c.slotMasterNode(ctx, slot)
|
||||
} else {
|
||||
node, err = c.nodes.Random()
|
||||
}
|
||||
@ -1502,20 +1510,20 @@ func (c *ClusterClient) pubSub() *PubSub {
|
||||
|
||||
// Subscribe subscribes the client to the specified channels.
|
||||
// Channels can be omitted to create empty subscription.
|
||||
func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
|
||||
func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.Subscribe(channels...)
|
||||
_ = pubsub.Subscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
|
||||
// PSubscribe subscribes the client to the given patterns.
|
||||
// Patterns can be omitted to create empty subscription.
|
||||
func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
|
||||
func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
|
||||
pubsub := c.pubSub()
|
||||
if len(channels) > 0 {
|
||||
_ = pubsub.PSubscribe(channels...)
|
||||
_ = pubsub.PSubscribe(ctx, channels...)
|
||||
}
|
||||
return pubsub
|
||||
}
|
||||
@ -1540,7 +1548,7 @@ func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := node.Client.Command().Result()
|
||||
info, err := node.Client.Command(context.TODO()).Result()
|
||||
if err == nil {
|
||||
return info, nil
|
||||
}
|
||||
@ -1582,8 +1590,12 @@ func cmdSlot(cmd Cmder, pos int) int {
|
||||
return hashtag.Slot(firstKey)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) cmdNode(cmdInfo *CommandInfo, slot int) (*clusterNode, error) {
|
||||
state, err := c.state.Get()
|
||||
func (c *ClusterClient) cmdNode(
|
||||
ctx context.Context,
|
||||
cmdInfo *CommandInfo,
|
||||
slot int,
|
||||
) (*clusterNode, error) {
|
||||
state, err := c.state.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1604,8 +1616,8 @@ func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*cluste
|
||||
return state.slotSlaveNode(slot)
|
||||
}
|
||||
|
||||
func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) {
|
||||
state, err := c.state.Get()
|
||||
func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
|
||||
state, err := c.state.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Reference in New Issue
Block a user