1
0
mirror of https://github.com/redis/go-redis.git synced 2025-07-18 00:20:57 +03:00

feat: Enable CI for Redis CE 8.0 (#3274)

* chore: extract benchmark tests

* wip

* enable pubsub tests

* enable ring tests

* stop tests with build redis from source

* start all tests

* mix of makefile and action

* add sentinel configs

* fix example test

* stop debug on re

* wip

* enable gears for redis 7.2

* wip

* enable sentinel, they are expected to fail

* fix: linter configuration

* chore: update re versions

* return older redis enterprise version

* add basic codeql

* wip: increase timeout, focus only sentinel tests

* sentinels with docker network host

* enable all tests

* fix flanky test

* enable example tests

* tidy docker compose

* add debug output

* stop shutingdown masters

* don't test sentinel for re

* skip unsuported addscores

* Update README

bump go version in CI

* Update README.md

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update CONTRIBUTING.md

add information about new test setup

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Nedyalko Dyakov
2025-02-28 12:49:00 +02:00
committed by GitHub
parent 5314a57132
commit ebe11d06ca
60 changed files with 671 additions and 561 deletions

View File

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"net"
"slices"
"strconv"
"strings"
"sync"
@ -19,10 +20,9 @@ import (
)
type clusterScenario struct {
ports []string
nodeIDs []string
processes map[string]*redisProcess
clients map[string]*redis.Client
ports []string
nodeIDs []string
clients map[string]*redis.Client
}
func (s *clusterScenario) slots() []int {
@ -101,20 +101,17 @@ func (s *clusterScenario) Close() error {
}
}
for _, port := range s.ports {
if process, ok := processes[port]; ok {
if process != nil {
process.Close()
}
delete(processes, port)
}
}
return nil
}
func configureClusterTopology(ctx context.Context, scenario *clusterScenario) error {
allowErrs := []string{
"ERR Slot 0 is already busy",
"ERR Slot 5461 is already busy",
"ERR Slot 10923 is already busy",
"ERR Slot 16384 is already busy",
}
err := collectNodeInformation(ctx, scenario)
if err != nil {
return err
@ -131,7 +128,7 @@ func configureClusterTopology(ctx context.Context, scenario *clusterScenario) er
slots := scenario.slots()
for pos, master := range scenario.masters() {
err := master.ClusterAddSlotsRange(ctx, slots[pos], slots[pos+1]-1).Err()
if err != nil {
if err != nil && slices.Contains(allowErrs, err.Error()) == false {
return err
}
}
@ -199,7 +196,7 @@ func configureClusterTopology(ctx context.Context, scenario *clusterScenario) er
return err
}
return assertSlotsEqual(res, wanted)
}, 60*time.Second)
}, 90*time.Second)
if err != nil {
return err
}
@ -214,31 +211,17 @@ func collectNodeInformation(ctx context.Context, scenario *clusterScenario) erro
Addr: ":" + port,
})
info, err := client.ClusterNodes(ctx).Result()
myID, err := client.ClusterMyID(ctx).Result()
if err != nil {
return err
}
scenario.clients[port] = client
scenario.nodeIDs[pos] = info[:40]
scenario.nodeIDs[pos] = myID
}
return nil
}
// startCluster start a cluster
func startCluster(ctx context.Context, scenario *clusterScenario) error {
// Start processes and collect node ids
for _, port := range scenario.ports {
process, err := startRedis(port, "--cluster-enabled", "yes")
if err != nil {
return err
}
scenario.processes[port] = process
}
return configureClusterTopology(ctx, scenario)
}
func assertSlotsEqual(slots, wanted []redis.ClusterSlot) error {
outerLoop:
for _, s2 := range wanted {