mirror of
				https://github.com/redis/go-redis.git
				synced 2025-11-04 02:33:24 +03:00 
			
		
		
		
	* chore: extract benchmark tests * wip * enable pubsub tests * enable ring tests * stop tests with build redis from source * start all tests * mix of makefile and action * add sentinel configs * fix example test * stop debug on re * wip * enable gears for redis 7.2 * wip * enable sentinel, they are expected to fail * fix: linter configuration * chore: update re versions * return older redis enterprise version * add basic codeql * wip: increase timeout, focus only sentinel tests * sentinels with docker network host * enable all tests * fix flanky test * enable example tests * tidy docker compose * add debug output * stop shutingdown masters * don't test sentinel for re * skip unsuported addscores * Update README bump go version in CI * Update README.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update CONTRIBUTING.md add information about new test setup --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
		
			
				
	
	
		
			438 lines
		
	
	
		
			8.6 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			438 lines
		
	
	
		
			8.6 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
package redis_test
 | 
						|
 | 
						|
import (
 | 
						|
	"bytes"
 | 
						|
	"context"
 | 
						|
	"fmt"
 | 
						|
	"strconv"
 | 
						|
	"strings"
 | 
						|
	"sync"
 | 
						|
	"testing"
 | 
						|
	"time"
 | 
						|
 | 
						|
	"github.com/redis/go-redis/v9"
 | 
						|
)
 | 
						|
 | 
						|
func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
 | 
						|
	client := redis.NewClient(&redis.Options{
 | 
						|
		Addr:         ":6379",
 | 
						|
		DialTimeout:  time.Second,
 | 
						|
		ReadTimeout:  time.Second,
 | 
						|
		WriteTimeout: time.Second,
 | 
						|
		PoolSize:     poolSize,
 | 
						|
	})
 | 
						|
	if err := client.FlushDB(ctx).Err(); err != nil {
 | 
						|
		panic(err)
 | 
						|
	}
 | 
						|
	return client
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkRedisPing(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	rdb := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer rdb.Close()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			if err := rdb.Ping(ctx).Err(); err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkSetGoroutines(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	rdb := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer rdb.Close()
 | 
						|
 | 
						|
	for i := 0; i < b.N; i++ {
 | 
						|
		var wg sync.WaitGroup
 | 
						|
 | 
						|
		for i := 0; i < 1000; i++ {
 | 
						|
			wg.Add(1)
 | 
						|
			go func() {
 | 
						|
				defer wg.Done()
 | 
						|
 | 
						|
				err := rdb.Set(ctx, "hello", "world", 0).Err()
 | 
						|
				if err != nil {
 | 
						|
					panic(err)
 | 
						|
				}
 | 
						|
			}()
 | 
						|
		}
 | 
						|
 | 
						|
		wg.Wait()
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkRedisGetNil(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			if err := client.Get(ctx, "key").Err(); err != redis.Nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
type setStringBenchmark struct {
 | 
						|
	poolSize  int
 | 
						|
	valueSize int
 | 
						|
}
 | 
						|
 | 
						|
func (bm setStringBenchmark) String() string {
 | 
						|
	return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkRedisSetString(b *testing.B) {
 | 
						|
	benchmarks := []setStringBenchmark{
 | 
						|
		{10, 64},
 | 
						|
		{10, 1024},
 | 
						|
		{10, 64 * 1024},
 | 
						|
		{10, 1024 * 1024},
 | 
						|
		{10, 10 * 1024 * 1024},
 | 
						|
 | 
						|
		{100, 64},
 | 
						|
		{100, 1024},
 | 
						|
		{100, 64 * 1024},
 | 
						|
		{100, 1024 * 1024},
 | 
						|
		{100, 10 * 1024 * 1024},
 | 
						|
	}
 | 
						|
	for _, bm := range benchmarks {
 | 
						|
		b.Run(bm.String(), func(b *testing.B) {
 | 
						|
			ctx := context.Background()
 | 
						|
			client := benchmarkRedisClient(ctx, bm.poolSize)
 | 
						|
			defer client.Close()
 | 
						|
 | 
						|
			value := strings.Repeat("1", bm.valueSize)
 | 
						|
 | 
						|
			b.ResetTimer()
 | 
						|
 | 
						|
			b.RunParallel(func(pb *testing.PB) {
 | 
						|
				for pb.Next() {
 | 
						|
					err := client.Set(ctx, "key", value, 0).Err()
 | 
						|
					if err != nil {
 | 
						|
						b.Fatal(err)
 | 
						|
					}
 | 
						|
				}
 | 
						|
			})
 | 
						|
		})
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkRedisSetGetBytes(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	value := bytes.Repeat([]byte{'1'}, 10000)
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			if err := client.Set(ctx, "key", value, 0).Err(); err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
 | 
						|
			got, err := client.Get(ctx, "key").Bytes()
 | 
						|
			if err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
			if !bytes.Equal(got, value) {
 | 
						|
				b.Fatalf("got != value")
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkRedisMGet(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil {
 | 
						|
		b.Fatal(err)
 | 
						|
	}
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			if err := client.MGet(ctx, "key1", "key2").Err(); err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkSetExpire(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
			if err := client.Expire(ctx, "key", time.Second).Err(); err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkPipeline(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			_, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
 | 
						|
				pipe.Set(ctx, "key", "hello", 0)
 | 
						|
				pipe.Expire(ctx, "key", time.Second)
 | 
						|
				return nil
 | 
						|
			})
 | 
						|
			if err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkZAdd(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			err := client.ZAdd(ctx, "key", redis.Z{
 | 
						|
				Score:  float64(1),
 | 
						|
				Member: "hello",
 | 
						|
			}).Err()
 | 
						|
			if err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkXRead(b *testing.B) {
 | 
						|
	ctx := context.Background()
 | 
						|
	client := benchmarkRedisClient(ctx, 10)
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	args := redis.XAddArgs{
 | 
						|
		Stream: "1",
 | 
						|
		ID:     "*",
 | 
						|
		Values: map[string]string{"uno": "dos"},
 | 
						|
	}
 | 
						|
 | 
						|
	lenStreams := 16
 | 
						|
	streams := make([]string, 0, lenStreams)
 | 
						|
	for i := 0; i < lenStreams; i++ {
 | 
						|
		streams = append(streams, strconv.Itoa(i))
 | 
						|
	}
 | 
						|
	for i := 0; i < lenStreams; i++ {
 | 
						|
		streams = append(streams, "0")
 | 
						|
	}
 | 
						|
 | 
						|
	b.ReportAllocs()
 | 
						|
	b.ResetTimer()
 | 
						|
 | 
						|
	b.RunParallel(func(pb *testing.PB) {
 | 
						|
		for pb.Next() {
 | 
						|
			client.XAdd(ctx, &args)
 | 
						|
 | 
						|
			err := client.XRead(ctx, &redis.XReadArgs{
 | 
						|
				Streams: streams,
 | 
						|
				Count:   1,
 | 
						|
				Block:   time.Second,
 | 
						|
			}).Err()
 | 
						|
			if err != nil {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
//------------------------------------------------------------------------------
 | 
						|
 | 
						|
func newClusterScenario() *clusterScenario {
 | 
						|
	return &clusterScenario{
 | 
						|
		ports:   []string{"16600", "16601", "16602", "16603", "16604", "16605"},
 | 
						|
		nodeIDs: make([]string, 6),
 | 
						|
		clients: make(map[string]*redis.Client, 6),
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
var clusterBench *clusterScenario
 | 
						|
 | 
						|
func BenchmarkClusterPing(b *testing.B) {
 | 
						|
	if testing.Short() {
 | 
						|
		b.Skip("skipping in short mode")
 | 
						|
	}
 | 
						|
 | 
						|
	ctx := context.Background()
 | 
						|
	if clusterBench == nil {
 | 
						|
		clusterBench = newClusterScenario()
 | 
						|
		if err := configureClusterTopology(ctx, clusterBench); err != nil {
 | 
						|
			b.Fatal(err)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	client := clusterBench.newClusterClient(ctx, redisClusterOptions())
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.Run("cluster ping", func(b *testing.B) {
 | 
						|
		b.ResetTimer()
 | 
						|
 | 
						|
		b.RunParallel(func(pb *testing.PB) {
 | 
						|
			for pb.Next() {
 | 
						|
				err := client.Ping(ctx).Err()
 | 
						|
				if err != nil {
 | 
						|
					b.Fatal(err)
 | 
						|
				}
 | 
						|
			}
 | 
						|
		})
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkClusterDoInt(b *testing.B) {
 | 
						|
	if testing.Short() {
 | 
						|
		b.Skip("skipping in short mode")
 | 
						|
	}
 | 
						|
 | 
						|
	ctx := context.Background()
 | 
						|
	if clusterBench == nil {
 | 
						|
		clusterBench = newClusterScenario()
 | 
						|
		if err := configureClusterTopology(ctx, clusterBench); err != nil {
 | 
						|
			b.Fatal(err)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	client := clusterBench.newClusterClient(ctx, redisClusterOptions())
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	b.Run("cluster do set int", func(b *testing.B) {
 | 
						|
		b.ResetTimer()
 | 
						|
		b.RunParallel(func(pb *testing.PB) {
 | 
						|
			for pb.Next() {
 | 
						|
				err := client.Do(ctx, "SET", 10, 10).Err()
 | 
						|
				if err != nil {
 | 
						|
					b.Fatal(err)
 | 
						|
				}
 | 
						|
			}
 | 
						|
		})
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkClusterSetString(b *testing.B) {
 | 
						|
	if testing.Short() {
 | 
						|
		b.Skip("skipping in short mode")
 | 
						|
	}
 | 
						|
 | 
						|
	ctx := context.Background()
 | 
						|
	if clusterBench == nil {
 | 
						|
		clusterBench = newClusterScenario()
 | 
						|
		if err := configureClusterTopology(ctx, clusterBench); err != nil {
 | 
						|
			b.Fatal(err)
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	client := clusterBench.newClusterClient(ctx, redisClusterOptions())
 | 
						|
	defer client.Close()
 | 
						|
 | 
						|
	value := string(bytes.Repeat([]byte{'1'}, 10000))
 | 
						|
 | 
						|
	b.Run("cluster set string", func(b *testing.B) {
 | 
						|
		b.ResetTimer()
 | 
						|
 | 
						|
		b.RunParallel(func(pb *testing.PB) {
 | 
						|
			for pb.Next() {
 | 
						|
				err := client.Set(ctx, "key", value, 0).Err()
 | 
						|
				if err != nil {
 | 
						|
					b.Fatal(err)
 | 
						|
				}
 | 
						|
			}
 | 
						|
		})
 | 
						|
	})
 | 
						|
}
 | 
						|
 | 
						|
func BenchmarkExecRingSetAddrsCmd(b *testing.B) {
 | 
						|
	const (
 | 
						|
		ringShard1Name = "ringShardOne"
 | 
						|
		ringShard2Name = "ringShardTwo"
 | 
						|
	)
 | 
						|
 | 
						|
	ring := redis.NewRing(&redis.RingOptions{
 | 
						|
		Addrs: map[string]string{
 | 
						|
			"ringShardOne": ":" + ringShard1Port,
 | 
						|
		},
 | 
						|
		NewClient: func(opt *redis.Options) *redis.Client {
 | 
						|
			// Simulate slow shard creation
 | 
						|
			time.Sleep(100 * time.Millisecond)
 | 
						|
			return redis.NewClient(opt)
 | 
						|
		},
 | 
						|
	})
 | 
						|
	defer ring.Close()
 | 
						|
 | 
						|
	if _, err := ring.Ping(context.Background()).Result(); err != nil {
 | 
						|
		b.Fatal(err)
 | 
						|
	}
 | 
						|
 | 
						|
	// Continuously update addresses by adding and removing one address
 | 
						|
	updatesDone := make(chan struct{})
 | 
						|
	defer func() { close(updatesDone) }()
 | 
						|
	go func() {
 | 
						|
		ticker := time.NewTicker(10 * time.Millisecond)
 | 
						|
		defer ticker.Stop()
 | 
						|
		for i := 0; ; i++ {
 | 
						|
			select {
 | 
						|
			case <-ticker.C:
 | 
						|
				if i%2 == 0 {
 | 
						|
					ring.SetAddrs(map[string]string{
 | 
						|
						ringShard1Name: ":" + ringShard1Port,
 | 
						|
					})
 | 
						|
				} else {
 | 
						|
					ring.SetAddrs(map[string]string{
 | 
						|
						ringShard1Name: ":" + ringShard1Port,
 | 
						|
						ringShard2Name: ":" + ringShard2Port,
 | 
						|
					})
 | 
						|
				}
 | 
						|
			case <-updatesDone:
 | 
						|
				return
 | 
						|
			}
 | 
						|
		}
 | 
						|
	}()
 | 
						|
 | 
						|
	b.ResetTimer()
 | 
						|
	for i := 0; i < b.N; i++ {
 | 
						|
		if _, err := ring.Ping(context.Background()).Result(); err != nil {
 | 
						|
			if err == redis.ErrClosed {
 | 
						|
				// The shard client could be closed while ping command is in progress
 | 
						|
				continue
 | 
						|
			} else {
 | 
						|
				b.Fatal(err)
 | 
						|
			}
 | 
						|
		}
 | 
						|
	}
 | 
						|
}
 |