1
0
mirror of https://github.com/redis/go-redis.git synced 2025-07-28 06:42:00 +03:00

Merge branch 'master' into ndyakov/token-based-auth

This commit is contained in:
Nedyalko Dyakov
2025-04-22 12:05:06 +03:00
committed by GitHub
11 changed files with 682 additions and 333 deletions

View File

@ -8,7 +8,7 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Check Spelling - name: Check Spelling
uses: rojopolis/spellcheck-github-actions@0.47.0 uses: rojopolis/spellcheck-github-actions@0.48.0
with: with:
config_path: .github/spellcheck-settings.yml config_path: .github/spellcheck-settings.yml
task_name: Markdown task_name: Markdown

View File

@ -3831,7 +3831,8 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error {
} }
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
// MapStringInterfaceCmd represents a command that returns a map of strings to interface{}.
// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}.
type MapMapStringInterfaceCmd struct { type MapMapStringInterfaceCmd struct {
baseCmd baseCmd
val map[string]interface{} val map[string]interface{}

View File

@ -330,7 +330,7 @@ func (info LibraryInfo) Validate() error {
return nil return nil
} }
// Hello Set the resp protocol used. // Hello sets the resp protocol used.
func (c statefulCmdable) Hello(ctx context.Context, func (c statefulCmdable) Hello(ctx context.Context,
ver int, username, password, clientName string, ver int, username, password, clientName string,
) *MapStringInterfaceCmd { ) *MapStringInterfaceCmd {

View File

@ -152,6 +152,32 @@ func ExampleClient_search_json() {
// >>> Tel Aviv // >>> Tel Aviv
// STEP_END // STEP_END
// STEP_START query2count_only
citiesResult2, err := rdb.FTSearchWithArgs(
ctx,
"idx:users",
"Paul",
&redis.FTSearchOptions{
Return: []redis.FTSearchReturn{
{
FieldName: "$.city",
As: "city",
},
},
CountOnly: true,
},
).Result()
if err != nil {
panic(err)
}
// The `Total` field has the correct number of docs found
// by the query but the `Docs` slice is empty.
fmt.Println(len(citiesResult2.Docs)) // >>> 0
fmt.Println(citiesResult2.Total) // >>> 2
// STEP_END
// STEP_START query3 // STEP_START query3
aggOptions := redis.FTAggregateOptions{ aggOptions := redis.FTAggregateOptions{
GroupBy: []redis.FTAggregateGroupBy{ GroupBy: []redis.FTAggregateGroupBy{
@ -196,6 +222,8 @@ func ExampleClient_search_json() {
// {1 [{user:3 <nil> <nil> <nil> map[$:{"age":35,"city":"Tel Aviv","email":"paul.zamir@example.com","name":"Paul Zamir"}]}]} // {1 [{user:3 <nil> <nil> <nil> map[$:{"age":35,"city":"Tel Aviv","email":"paul.zamir@example.com","name":"Paul Zamir"}]}]}
// London // London
// Tel Aviv // Tel Aviv
// 0
// 2
// London - 1 // London - 1
// Tel Aviv - 2 // Tel Aviv - 2
} }

View File

@ -13,6 +13,7 @@ type HashCmdable interface {
HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd
HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd
HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd
HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
HKeys(ctx context.Context, key string) *StringSliceCmd HKeys(ctx context.Context, key string) *StringSliceCmd
HLen(ctx context.Context, key string) *IntCmd HLen(ctx context.Context, key string) *IntCmd
@ -479,7 +480,7 @@ func (c cmdable) HGetEX(ctx context.Context, key string, fields ...string) *Stri
return cmd return cmd
} }
// ExpirationType represents an expiration option for the HGETEX command. // HGetEXExpirationType represents an expiration option for the HGETEX command.
type HGetEXExpirationType string type HGetEXExpirationType string
const ( const (

View File

@ -352,3 +352,27 @@ var _ = Describe("withConn", func() {
Expect(client.connPool.Len()).To(Equal(1)) Expect(client.connPool.Len()).To(Equal(1))
}) })
}) })
var _ = Describe("ClusterClient", func() {
var client *ClusterClient
BeforeEach(func() {
client = &ClusterClient{}
})
Describe("cmdSlot", func() {
It("select slot from args for GETKEYSINSLOT command", func() {
cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", 100, 200)
slot := client.cmdSlot(context.Background(), cmd)
Expect(slot).To(Equal(100))
})
It("select slot from args for COUNTKEYSINSLOT command", func() {
cmd := NewStringSliceCmd(ctx, "cluster", "countkeysinslot", 100)
slot := client.cmdSlot(context.Background(), cmd)
Expect(slot).To(Equal(100))
})
})
})

View File

@ -1859,7 +1859,7 @@ func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args() args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" { if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") {
return args[2].(int) return args[2].(int)
} }

View File

@ -114,6 +114,7 @@ type SpellCheckTerms struct {
} }
type FTExplainOptions struct { type FTExplainOptions struct {
// Dialect 1,3 and 4 are deprecated since redis 8.0
Dialect string Dialect string
} }
@ -261,6 +262,7 @@ type FTAggregateOptions struct {
WithCursor bool WithCursor bool
WithCursorOptions *FTAggregateWithCursor WithCursorOptions *FTAggregateWithCursor
Params map[string]interface{} Params map[string]interface{}
// Dialect 1,3 and 4 are deprecated since redis 8.0
DialectVersion int DialectVersion int
} }
@ -320,7 +322,11 @@ type FTSearchOptions struct {
SortByWithCount bool SortByWithCount bool
LimitOffset int LimitOffset int
Limit int Limit int
// CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set.
// When using this option, the Limit and LimitOffset options are ignored.
CountOnly bool
Params map[string]interface{} Params map[string]interface{}
// Dialect 1,3 and 4 are deprecated since redis 8.0
DialectVersion int DialectVersion int
} }
@ -437,6 +443,7 @@ type IndexDefinition struct {
type FTSpellCheckOptions struct { type FTSpellCheckOptions struct {
Distance int Distance int
Terms *FTSpellCheckTerms Terms *FTSpellCheckTerms
// Dialect 1,3 and 4 are deprecated since redis 8.0
Dialect int Dialect int
} }
@ -1954,9 +1961,13 @@ func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query strin
args = append(args, "WITHCOUNT") args = append(args, "WITHCOUNT")
} }
} }
if options.LimitOffset >= 0 && options.Limit > 0 { if options.CountOnly {
args = append(args, "LIMIT", 0, 0)
} else {
if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 {
args = append(args, "LIMIT", options.LimitOffset, options.Limit) args = append(args, "LIMIT", options.LimitOffset, options.Limit)
} }
}
if options.Params != nil { if options.Params != nil {
args = append(args, "PARAMS", len(options.Params)*2) args = append(args, "PARAMS", len(options.Params)*2)
for key, value := range options.Params { for key, value := range options.Params {
@ -2090,216 +2101,3 @@ func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *Str
_ = c(ctx, cmd) _ = c(ctx, cmd)
return cmd return cmd
} }
// TODO: remove FTProfile
// type FTProfileResult struct {
// Results []interface{}
// Profile ProfileDetails
// }
// type ProfileDetails struct {
// TotalProfileTime string
// ParsingTime string
// PipelineCreationTime string
// Warning string
// IteratorsProfile []IteratorProfile
// ResultProcessorsProfile []ResultProcessorProfile
// }
// type IteratorProfile struct {
// Type string
// QueryType string
// Time interface{}
// Counter int
// Term string
// Size int
// ChildIterators []IteratorProfile
// }
// type ResultProcessorProfile struct {
// Type string
// Time interface{}
// Counter int
// }
// func parseFTProfileResult(data []interface{}) (FTProfileResult, error) {
// var result FTProfileResult
// if len(data) < 2 {
// return result, fmt.Errorf("unexpected data length")
// }
// // Parse results
// result.Results = data[0].([]interface{})
// // Parse profile details
// profileData := data[1].([]interface{})
// profileDetails := ProfileDetails{}
// for i := 0; i < len(profileData); i += 2 {
// switch profileData[i].(string) {
// case "Total profile time":
// profileDetails.TotalProfileTime = profileData[i+1].(string)
// case "Parsing time":
// profileDetails.ParsingTime = profileData[i+1].(string)
// case "Pipeline creation time":
// profileDetails.PipelineCreationTime = profileData[i+1].(string)
// case "Warning":
// profileDetails.Warning = profileData[i+1].(string)
// case "Iterators profile":
// profileDetails.IteratorsProfile = parseIteratorsProfile(profileData[i+1].([]interface{}))
// case "Result processors profile":
// profileDetails.ResultProcessorsProfile = parseResultProcessorsProfile(profileData[i+1].([]interface{}))
// }
// }
// result.Profile = profileDetails
// return result, nil
// }
// func parseIteratorsProfile(data []interface{}) []IteratorProfile {
// var iterators []IteratorProfile
// for _, item := range data {
// profile := item.([]interface{})
// iterator := IteratorProfile{}
// for i := 0; i < len(profile); i += 2 {
// switch profile[i].(string) {
// case "Type":
// iterator.Type = profile[i+1].(string)
// case "Query type":
// iterator.QueryType = profile[i+1].(string)
// case "Time":
// iterator.Time = profile[i+1]
// case "Counter":
// iterator.Counter = int(profile[i+1].(int64))
// case "Term":
// iterator.Term = profile[i+1].(string)
// case "Size":
// iterator.Size = int(profile[i+1].(int64))
// case "Child iterators":
// iterator.ChildIterators = parseChildIteratorsProfile(profile[i+1].([]interface{}))
// }
// }
// iterators = append(iterators, iterator)
// }
// return iterators
// }
// func parseChildIteratorsProfile(data []interface{}) []IteratorProfile {
// var iterators []IteratorProfile
// for _, item := range data {
// profile := item.([]interface{})
// iterator := IteratorProfile{}
// for i := 0; i < len(profile); i += 2 {
// switch profile[i].(string) {
// case "Type":
// iterator.Type = profile[i+1].(string)
// case "Query type":
// iterator.QueryType = profile[i+1].(string)
// case "Time":
// iterator.Time = profile[i+1]
// case "Counter":
// iterator.Counter = int(profile[i+1].(int64))
// case "Term":
// iterator.Term = profile[i+1].(string)
// case "Size":
// iterator.Size = int(profile[i+1].(int64))
// }
// }
// iterators = append(iterators, iterator)
// }
// return iterators
// }
// func parseResultProcessorsProfile(data []interface{}) []ResultProcessorProfile {
// var processors []ResultProcessorProfile
// for _, item := range data {
// profile := item.([]interface{})
// processor := ResultProcessorProfile{}
// for i := 0; i < len(profile); i += 2 {
// switch profile[i].(string) {
// case "Type":
// processor.Type = profile[i+1].(string)
// case "Time":
// processor.Time = profile[i+1]
// case "Counter":
// processor.Counter = int(profile[i+1].(int64))
// }
// }
// processors = append(processors, processor)
// }
// return processors
// }
// func NewFTProfileCmd(ctx context.Context, args ...interface{}) *FTProfileCmd {
// return &FTProfileCmd{
// baseCmd: baseCmd{
// ctx: ctx,
// args: args,
// },
// }
// }
// type FTProfileCmd struct {
// baseCmd
// val FTProfileResult
// }
// func (cmd *FTProfileCmd) String() string {
// return cmdString(cmd, cmd.val)
// }
// func (cmd *FTProfileCmd) SetVal(val FTProfileResult) {
// cmd.val = val
// }
// func (cmd *FTProfileCmd) Result() (FTProfileResult, error) {
// return cmd.val, cmd.err
// }
// func (cmd *FTProfileCmd) Val() FTProfileResult {
// return cmd.val
// }
// func (cmd *FTProfileCmd) readReply(rd *proto.Reader) (err error) {
// data, err := rd.ReadSlice()
// if err != nil {
// return err
// }
// cmd.val, err = parseFTProfileResult(data)
// if err != nil {
// cmd.err = err
// }
// return nil
// }
// // FTProfile - Executes a search query and returns a profile of how the query was processed.
// // The 'index' parameter specifies the index to search, the 'limited' parameter specifies whether to limit the results,
// // and the 'query' parameter specifies the search / aggreagte query. Please notice that you must either pass a SearchQuery or an AggregateQuery.
// // For more information, please refer to the Redis documentation:
// // [FT.PROFILE]: (https://redis.io/commands/ft.profile/)
// func (c cmdable) FTProfile(ctx context.Context, index string, limited bool, query interface{}) *FTProfileCmd {
// queryType := ""
// var argsQuery []interface{}
// switch v := query.(type) {
// case AggregateQuery:
// queryType = "AGGREGATE"
// argsQuery = v
// case SearchQuery:
// queryType = "SEARCH"
// argsQuery = v
// default:
// panic("FT.PROFILE: query must be either AggregateQuery or SearchQuery")
// }
// args := []interface{}{"FT.PROFILE", index, queryType}
// if limited {
// args = append(args, "LIMITED")
// }
// args = append(args, "QUERY")
// args = append(args, argsQuery...)
// cmd := NewFTProfileCmd(ctx, args...)
// _ = c(ctx, cmd)
// return cmd
// }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"strconv" "strconv"
"strings"
"time" "time"
. "github.com/bsm/ginkgo/v2" . "github.com/bsm/ginkgo/v2"
@ -1683,6 +1684,520 @@ var _ = Describe("RediSearch commands Resp 2", Label("search"), func() {
Expect(resUint8.Docs[0].ID).To(BeEquivalentTo("doc1")) Expect(resUint8.Docs[0].ID).To(BeEquivalentTo("doc1"))
}) })
It("should fail when using a non-zero offset with a zero limit", Label("search", "ftsearch"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "testIdx", &redis.FTCreateOptions{}, &redis.FieldSchema{
FieldName: "txt",
FieldType: redis.SearchFieldTypeText,
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "testIdx")
client.HSet(ctx, "doc1", "txt", "hello world")
// Attempt to search with a non-zero offset and zero limit.
_, err = client.FTSearchWithArgs(ctx, "testIdx", "hello", &redis.FTSearchOptions{
LimitOffset: 5,
Limit: 0,
}).Result()
Expect(err).To(HaveOccurred())
})
It("should evaluate exponentiation precedence in APPLY expressions correctly", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "txns", &redis.FTCreateOptions{}, &redis.FieldSchema{
FieldName: "dummy",
FieldType: redis.SearchFieldTypeText,
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "txns")
client.HSet(ctx, "doc1", "dummy", "dummy")
correctOptions := &redis.FTAggregateOptions{
Apply: []redis.FTAggregateApply{
{Field: "(2*3^2)", As: "Value"},
},
Limit: 1,
LimitOffset: 0,
}
correctRes, err := client.FTAggregateWithArgs(ctx, "txns", "*", correctOptions).Result()
Expect(err).NotTo(HaveOccurred())
Expect(correctRes.Rows[0].Fields["Value"]).To(BeEquivalentTo("18"))
})
It("should return a syntax error when empty strings are used for numeric parameters", Label("search", "ftsearch"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "idx", &redis.FTCreateOptions{}, &redis.FieldSchema{
FieldName: "n",
FieldType: redis.SearchFieldTypeNumeric,
}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "idx")
client.HSet(ctx, "doc1", "n", 0)
_, err = client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{
Filters: []redis.FTSearchFilter{{
FieldName: "n",
Min: "",
Max: "",
}},
DialectVersion: 2,
}).Result()
Expect(err).To(HaveOccurred())
})
It("should return NaN as default for AVG reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestAvg", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestAvg")
client.HSet(ctx, "doc1", "grp", "g1")
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchAvg, Args: []interface{}{"@n"}, As: "avg"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestAvg", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["avg"]).To(SatisfyAny(Equal("nan"), Equal("NaN")))
})
It("should return 1 as default for COUNT reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestCount", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestCount")
client.HSet(ctx, "doc1", "grp", "g1")
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchCount, As: "cnt"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestCount", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["cnt"]).To(BeEquivalentTo("1"))
})
It("should return NaN as default for SUM reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestSum", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestSum")
client.HSet(ctx, "doc1", "grp", "g1")
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchSum, Args: []interface{}{"@n"}, As: "sum"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestSum", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["sum"]).To(SatisfyAny(Equal("nan"), Equal("NaN")))
})
It("should return the full requested number of results by re-running the query when some results expire", Label("search", "ftsearch"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggExpired", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "order", FieldType: redis.SearchFieldTypeNumeric, Sortable: true},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggExpired")
for i := 1; i <= 15; i++ {
key := fmt.Sprintf("doc%d", i)
_, err := client.HSet(ctx, key, "order", i).Result()
Expect(err).NotTo(HaveOccurred())
}
_, err = client.Del(ctx, "doc3", "doc7").Result()
Expect(err).NotTo(HaveOccurred())
options := &redis.FTSearchOptions{
SortBy: []redis.FTSearchSortBy{{FieldName: "order", Asc: true}},
LimitOffset: 0,
Limit: 10,
}
res, err := client.FTSearchWithArgs(ctx, "aggExpired", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(len(res.Docs)).To(BeEquivalentTo(10))
for _, doc := range res.Docs {
Expect(doc.ID).ToNot(Or(Equal("doc3"), Equal("doc7")))
}
})
It("should stop processing and return an error when a timeout occurs", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTimeoutHeavy", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric, Sortable: true},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTimeoutHeavy")
const totalDocs = 10000
for i := 0; i < totalDocs; i++ {
key := fmt.Sprintf("doc%d", i)
_, err := client.HSet(ctx, key, "n", i).Result()
Expect(err).NotTo(HaveOccurred())
}
options := &redis.FTAggregateOptions{
SortBy: []redis.FTAggregateSortBy{{FieldName: "@n", Desc: true}},
LimitOffset: 0,
Limit: 100,
Timeout: 1, // 1 ms timeout, expected to trigger a timeout error.
}
_, err = client.FTAggregateWithArgs(ctx, "aggTimeoutHeavy", "*", options).Result()
Expect(err).To(HaveOccurred())
Expect(strings.ToLower(err.Error())).To(ContainSubstring("timeout"))
})
It("should return 0 as default for COUNT_DISTINCT reducer when no values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestCountDistinct", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "x", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestCountDistinct")
client.HSet(ctx, "doc1", "grp", "g1")
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchCountDistinct, Args: []interface{}{"@x"}, As: "distinct_count"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestCountDistinct", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["distinct_count"]).To(BeEquivalentTo("0"))
})
It("should return 0 as default for COUNT_DISTINCTISH reducer when no values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestCountDistinctIsh", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "y", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestCountDistinctIsh")
_, err = client.HSet(ctx, "doc1", "grp", "g1").Result()
Expect(err).NotTo(HaveOccurred())
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchCountDistinctish, Args: []interface{}{"@y"}, As: "distinctish_count"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestCountDistinctIsh", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["distinctish_count"]).To(BeEquivalentTo("0"))
})
It("should use BM25 as the default scorer", Label("search", "ftsearch"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "scoringTest", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "description", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "scoringTest")
_, err = client.HSet(ctx, "doc1", "description", "red apple").Result()
Expect(err).NotTo(HaveOccurred())
_, err = client.HSet(ctx, "doc2", "description", "green apple").Result()
Expect(err).NotTo(HaveOccurred())
resDefault, err := client.FTSearchWithArgs(ctx, "scoringTest", "apple", &redis.FTSearchOptions{WithScores: true}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(resDefault.Total).To(BeNumerically(">", 0))
resBM25, err := client.FTSearchWithArgs(ctx, "scoringTest", "apple", &redis.FTSearchOptions{WithScores: true, Scorer: "BM25"}).Result()
Expect(err).NotTo(HaveOccurred())
Expect(resBM25.Total).To(BeNumerically(">", 0))
Expect(resDefault.Total).To(BeEquivalentTo(resBM25.Total))
Expect(resDefault.Docs[0].ID).To(BeElementOf("doc1", "doc2"))
Expect(resDefault.Docs[1].ID).To(BeElementOf("doc1", "doc2"))
})
It("should return 0 as default for STDDEV reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestStddev", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestStddev")
_, err = client.HSet(ctx, "doc1", "grp", "g1").Result()
Expect(err).NotTo(HaveOccurred())
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchStdDev, Args: []interface{}{"@n"}, As: "stddev"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestStddev", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["stddev"]).To(BeEquivalentTo("0"))
})
It("should return NaN as default for QUANTILE reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestQuantile", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestQuantile")
_, err = client.HSet(ctx, "doc1", "grp", "g1").Result()
Expect(err).NotTo(HaveOccurred())
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchQuantile, Args: []interface{}{"@n", 0.5}, As: "quantile"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestQuantile", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["quantile"]).To(SatisfyAny(Equal("nan"), Equal("NaN")))
})
It("should return nil as default for FIRST_VALUE reducer when no values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestFirstValue", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestFirstValue")
_, err = client.HSet(ctx, "doc1", "grp", "g1").Result()
Expect(err).NotTo(HaveOccurred())
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchFirstValue, Args: []interface{}{"@t"}, As: "first_val"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestFirstValue", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["first_val"]).To(BeNil())
})
It("should fail to add an alias that is an existing index name", Label("search", "ftalias"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "idx1")
val, err = client.FTCreate(ctx, "idx2", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "idx2")
_, err = client.FTAliasAdd(ctx, "idx2", "idx1").Result()
Expect(err).To(HaveOccurred())
Expect(strings.ToLower(err.Error())).To(ContainSubstring("alias"))
})
It("should test ft.search with CountOnly param", Label("search", "ftsearch"), func() {
val, err := client.FTCreate(ctx, "txtIndex", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "txtIndex")
_, err = client.HSet(ctx, "doc1", "txt", "hello world").Result()
Expect(err).NotTo(HaveOccurred())
_, err = client.HSet(ctx, "doc2", "txt", "hello go").Result()
Expect(err).NotTo(HaveOccurred())
_, err = client.HSet(ctx, "doc3", "txt", "hello redis").Result()
Expect(err).NotTo(HaveOccurred())
optsCountOnly := &redis.FTSearchOptions{
CountOnly: true,
LimitOffset: 0,
Limit: 2, // even though we limit to 2, with count-only no docs are returned
DialectVersion: 2,
}
resCountOnly, err := client.FTSearchWithArgs(ctx, "txtIndex", "hello", optsCountOnly).Result()
Expect(err).NotTo(HaveOccurred())
Expect(resCountOnly.Total).To(BeEquivalentTo(3))
Expect(len(resCountOnly.Docs)).To(BeEquivalentTo(0))
optsLimit := &redis.FTSearchOptions{
CountOnly: false,
LimitOffset: 0,
Limit: 2, // we expect to get 2 documents even though total count is 3
DialectVersion: 2,
}
resLimit, err := client.FTSearchWithArgs(ctx, "txtIndex", "hello", optsLimit).Result()
Expect(err).NotTo(HaveOccurred())
Expect(resLimit.Total).To(BeEquivalentTo(3))
Expect(len(resLimit.Docs)).To(BeEquivalentTo(2))
})
It("should reject deprecated configuration keys", Label("search", "ftconfig"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
// List of deprecated configuration keys.
deprecatedKeys := []string{
"_FREE_RESOURCE_ON_THREAD",
"_NUMERIC_COMPRESS",
"_NUMERIC_RANGES_PARENTS",
"_PRINT_PROFILE_CLOCK",
"_PRIORITIZE_INTERSECT_UNION_CHILDREN",
"BG_INDEX_SLEEP_GAP",
"CONN_PER_SHARD",
"CURSOR_MAX_IDLE",
"CURSOR_REPLY_THRESHOLD",
"DEFAULT_DIALECT",
"EXTLOAD",
"FORK_GC_CLEAN_THRESHOLD",
"FORK_GC_RETRY_INTERVAL",
"FORK_GC_RUN_INTERVAL",
"FORKGC_SLEEP_BEFORE_EXIT",
"FRISOINI",
"GC_POLICY",
"GCSCANSIZE",
"INDEX_CURSOR_LIMIT",
"MAXAGGREGATERESULTS",
"MAXDOCTABLESIZE",
"MAXPREFIXEXPANSIONS",
"MAXSEARCHRESULTS",
"MIN_OPERATION_WORKERS",
"MIN_PHONETIC_TERM_LEN",
"MINPREFIX",
"MINSTEMLEN",
"NO_MEM_POOLS",
"NOGC",
"ON_TIMEOUT",
"MULTI_TEXT_SLOP",
"PARTIAL_INDEXED_DOCS",
"RAW_DOCID_ENCODING",
"SEARCH_THREADS",
"TIERED_HNSW_BUFFER_LIMIT",
"TIMEOUT",
"TOPOLOGY_VALIDATION_TIMEOUT",
"UNION_ITERATOR_HEAP",
"VSS_MAX_RESIZE",
"WORKERS",
"WORKERS_PRIORITY_BIAS_THRESHOLD",
"MT_MODE",
"WORKER_THREADS",
}
for _, key := range deprecatedKeys {
_, err := client.FTConfigSet(ctx, key, "test_value").Result()
Expect(err).To(HaveOccurred())
}
val, err := client.ConfigGet(ctx, "*").Result()
Expect(err).NotTo(HaveOccurred())
// Since FT.CONFIG is deprecated since redis 8, use CONFIG instead with new search parameters.
keys := make([]string, 0, len(val))
for key := range val {
keys = append(keys, key)
}
Expect(keys).To(ContainElement(ContainSubstring("search")))
})
It("should return INF for MIN reducer and -INF for MAX reducer when no numeric values are present", Label("search", "ftaggregate"), func() {
SkipBeforeRedisVersion(7.9, "requires Redis 8.x")
val, err := client.FTCreate(ctx, "aggTestMinMax", &redis.FTCreateOptions{},
&redis.FieldSchema{FieldName: "grp", FieldType: redis.SearchFieldTypeText},
&redis.FieldSchema{FieldName: "n", FieldType: redis.SearchFieldTypeNumeric},
).Result()
Expect(err).NotTo(HaveOccurred())
Expect(val).To(BeEquivalentTo("OK"))
WaitForIndexing(client, "aggTestMinMax")
_, err = client.HSet(ctx, "doc1", "grp", "g1").Result()
Expect(err).NotTo(HaveOccurred())
reducers := []redis.FTAggregateReducer{
{Reducer: redis.SearchMin, Args: []interface{}{"@n"}, As: "minValue"},
{Reducer: redis.SearchMax, Args: []interface{}{"@n"}, As: "maxValue"},
}
groupBy := []redis.FTAggregateGroupBy{
{Fields: []interface{}{"@grp"}, Reduce: reducers},
}
options := &redis.FTAggregateOptions{GroupBy: groupBy}
res, err := client.FTAggregateWithArgs(ctx, "aggTestMinMax", "*", options).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res.Rows).ToNot(BeEmpty())
Expect(res.Rows[0].Fields["minValue"]).To(BeEquivalentTo("inf"))
Expect(res.Rows[0].Fields["maxValue"]).To(BeEquivalentTo("-inf"))
})
}) })
func _assert_geosearch_result(result *redis.FTSearchResult, expectedDocIDs []string) { func _assert_geosearch_result(result *redis.FTSearchResult, expectedDocIDs []string) {
@ -1694,96 +2209,6 @@ func _assert_geosearch_result(result *redis.FTSearchResult, expectedDocIDs []str
Expect(result.Total).To(BeEquivalentTo(len(expectedDocIDs))) Expect(result.Total).To(BeEquivalentTo(len(expectedDocIDs)))
} }
// It("should FTProfile Search and Aggregate", Label("search", "ftprofile"), func() {
// val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeText}).Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(val).To(BeEquivalentTo("OK"))
// WaitForIndexing(client, "idx1")
// client.HSet(ctx, "1", "t", "hello")
// client.HSet(ctx, "2", "t", "world")
// // FTProfile Search
// query := redis.FTSearchQuery("hello|world", &redis.FTSearchOptions{NoContent: true})
// res1, err := client.FTProfile(ctx, "idx1", false, query).Result()
// Expect(err).NotTo(HaveOccurred())
// panic(res1)
// Expect(len(res1["results"].([]interface{}))).To(BeEquivalentTo(3))
// resProfile := res1["profile"].(map[interface{}]interface{})
// Expect(resProfile["Parsing time"].(float64) < 0.5).To(BeTrue())
// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{})
// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2.0))
// Expect(iterProfile0["Type"]).To(BeEquivalentTo("UNION"))
// // FTProfile Aggregate
// aggQuery := redis.FTAggregateQuery("*", &redis.FTAggregateOptions{
// Load: []redis.FTAggregateLoad{{Field: "t"}},
// Apply: []redis.FTAggregateApply{{Field: "startswith(@t, 'hel')", As: "prefix"}}})
// res2, err := client.FTProfile(ctx, "idx1", false, aggQuery).Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(len(res2["results"].([]interface{}))).To(BeEquivalentTo(2))
// resProfile = res2["profile"].(map[interface{}]interface{})
// iterProfile0 = resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{})
// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2))
// Expect(iterProfile0["Type"]).To(BeEquivalentTo("WILDCARD"))
// })
// It("should FTProfile Search Limited", Label("search", "ftprofile"), func() {
// val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeText}).Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(val).To(BeEquivalentTo("OK"))
// WaitForIndexing(client, "idx1")
// client.HSet(ctx, "1", "t", "hello")
// client.HSet(ctx, "2", "t", "hell")
// client.HSet(ctx, "3", "t", "help")
// client.HSet(ctx, "4", "t", "helowa")
// // FTProfile Search
// query := redis.FTSearchQuery("%hell% hel*", &redis.FTSearchOptions{})
// res1, err := client.FTProfile(ctx, "idx1", true, query).Result()
// Expect(err).NotTo(HaveOccurred())
// resProfile := res1["profile"].(map[interface{}]interface{})
// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{})
// Expect(iterProfile0["Type"]).To(BeEquivalentTo("INTERSECT"))
// Expect(len(res1["results"].([]interface{}))).To(BeEquivalentTo(3))
// Expect(iterProfile0["Child iterators"].([]interface{})[0].(map[interface{}]interface{})["Child iterators"]).To(BeEquivalentTo("The number of iterators in the union is 3"))
// Expect(iterProfile0["Child iterators"].([]interface{})[1].(map[interface{}]interface{})["Child iterators"]).To(BeEquivalentTo("The number of iterators in the union is 4"))
// })
// It("should FTProfile Search query params", Label("search", "ftprofile"), func() {
// hnswOptions := &redis.FTHNSWOptions{Type: "FLOAT32", Dim: 2, DistanceMetric: "L2"}
// val, err := client.FTCreate(ctx, "idx1",
// &redis.FTCreateOptions{},
// &redis.FieldSchema{FieldName: "v", FieldType: redis.SearchFieldTypeVector, VectorArgs: &redis.FTVectorArgs{HNSWOptions: hnswOptions}}).Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(val).To(BeEquivalentTo("OK"))
// WaitForIndexing(client, "idx1")
// client.HSet(ctx, "a", "v", "aaaaaaaa")
// client.HSet(ctx, "b", "v", "aaaabaaa")
// client.HSet(ctx, "c", "v", "aaaaabaa")
// // FTProfile Search
// searchOptions := &redis.FTSearchOptions{
// Return: []redis.FTSearchReturn{{FieldName: "__v_score"}},
// SortBy: []redis.FTSearchSortBy{{FieldName: "__v_score", Asc: true}},
// DialectVersion: 2,
// Params: map[string]interface{}{"vec": "aaaaaaaa"},
// }
// query := redis.FTSearchQuery("*=>[KNN 2 @v $vec]", searchOptions)
// res1, err := client.FTProfile(ctx, "idx1", false, query).Result()
// Expect(err).NotTo(HaveOccurred())
// resProfile := res1["profile"].(map[interface{}]interface{})
// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{})
// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2))
// Expect(iterProfile0["Type"]).To(BeEquivalentTo(redis.SearchFieldTypeVector.String()))
// Expect(res1["total_results"]).To(BeEquivalentTo(2))
// results0 := res1["results"].([]interface{})[0].(map[interface{}]interface{})
// Expect(results0["id"]).To(BeEquivalentTo("a"))
// Expect(results0["extra_attributes"].(map[interface{}]interface{})["__v_score"]).To(BeEquivalentTo("0"))
// })
var _ = Describe("RediSearch FT.Config with Resp2 and Resp3", Label("search", "NonRedisEnterprise"), func() { var _ = Describe("RediSearch FT.Config with Resp2 and Resp3", Label("search", "NonRedisEnterprise"), func() {
var clientResp2 *redis.Client var clientResp2 *redis.Client

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt"
"net" "net"
"strings" "strings"
"sync" "sync"
@ -566,29 +567,50 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
} }
} }
var (
masterAddr string
wg sync.WaitGroup
once sync.Once
errCh = make(chan error, len(c.sentinelAddrs))
)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for i, sentinelAddr := range c.sentinelAddrs { for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) wg.Add(1)
go func(i int, addr string) {
masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() defer wg.Done()
sentinelCli := NewSentinelClient(c.opt.sentinelOptions(addr))
addrVal, err := sentinelCli.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
if err != nil { if err != nil {
_ = sentinel.Close() internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName addr=%s, master=%q failed: %s",
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { addr, c.opt.MasterName, err)
return "", err _ = sentinelCli.Close()
errCh <- err
return
} }
internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", once.Do(func() {
c.opt.MasterName, err) masterAddr = net.JoinHostPort(addrVal[0], addrVal[1])
continue // Push working sentinel to the top
}
// Push working sentinel to the top.
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
c.setSentinel(ctx, sentinel) c.setSentinel(ctx, sentinelCli)
internal.Logger.Printf(ctx, "sentinel: selected addr=%s masterAddr=%s", addr, masterAddr)
addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) cancel()
return addr, nil })
}(i, sentinelAddr)
} }
return "", errors.New("redis: all sentinels specified in configuration are unreachable") wg.Wait()
close(errCh)
if masterAddr != "" {
return masterAddr, nil
}
errs := make([]error, 0, len(errCh))
for err := range errCh {
errs = append(errs, err)
}
return "", fmt.Errorf("redis: all sentinels specified in configuration are unreachable: %w", errors.Join(errs...))
} }
func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
@ -815,6 +837,22 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
} }
opt := failoverOpt.clusterOptions() opt := failoverOpt.clusterOptions()
if failoverOpt.DB != 0 {
onConnect := opt.OnConnect
opt.OnConnect = func(ctx context.Context, cn *Conn) error {
if err := cn.Select(ctx, failoverOpt.DB).Err(); err != nil {
return err
}
if onConnect != nil {
return onConnect(ctx, cn)
}
return nil
}
}
opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
masterAddr, err := failover.MasterAddr(ctx) masterAddr, err := failover.MasterAddr(ctx)
if err != nil { if err != nil {

View File

@ -3,6 +3,7 @@ package redis_test
import ( import (
"context" "context"
"net" "net"
"time"
. "github.com/bsm/ginkgo/v2" . "github.com/bsm/ginkgo/v2"
. "github.com/bsm/gomega" . "github.com/bsm/gomega"
@ -32,6 +33,24 @@ var _ = Describe("Sentinel PROTO 2", func() {
}) })
}) })
var _ = Describe("Sentinel resolution", func() {
It("should resolve master without context exhaustion", func() {
shortCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: sentinelName,
SentinelAddrs: sentinelAddrs,
MaxRetries: -1,
})
err := client.Ping(shortCtx).Err()
Expect(err).NotTo(HaveOccurred(), "expected master to resolve without context exhaustion")
_ = client.Close()
})
})
var _ = Describe("Sentinel", func() { var _ = Describe("Sentinel", func() {
var client *redis.Client var client *redis.Client
var master *redis.Client var master *redis.Client
@ -200,6 +219,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
SentinelAddrs: sentinelAddrs, SentinelAddrs: sentinelAddrs,
RouteRandomly: true, RouteRandomly: true,
DB: 1,
}) })
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
@ -289,6 +309,20 @@ var _ = Describe("NewFailoverClusterClient", func() {
}) })
}) })
It("should sentinel cluster client db", func() {
err := client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
return c.Ping(ctx).Err()
})
Expect(err).NotTo(HaveOccurred())
_ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
clientInfo, err := c.ClientInfo(ctx).Result()
Expect(err).NotTo(HaveOccurred())
Expect(clientInfo.DB).To(Equal(1))
return nil
})
})
It("should sentinel cluster PROTO 3", func() { It("should sentinel cluster PROTO 3", func() {
_ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error { _ = client.ForEachShard(ctx, func(ctx context.Context, c *redis.Client) error {
val, err := client.Do(ctx, "HELLO").Result() val, err := client.Do(ctx, "HELLO").Result()