diff --git a/src/bin/pg_basebackup/bbstreamer_lz4.c b/src/bin/pg_basebackup/bbstreamer_lz4.c index 395fcb92795..782f5581559 100644 --- a/src/bin/pg_basebackup/bbstreamer_lz4.c +++ b/src/bin/pg_basebackup/bbstreamer_lz4.c @@ -320,9 +320,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer, mystreamer = (bbstreamer_lz4_frame *) streamer; next_in = (uint8 *) data; - next_out = (uint8 *) mystreamer->base.bbs_buffer.data; + next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written; avail_in = len; - avail_out = mystreamer->base.bbs_buffer.maxlen; + avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written; while (avail_in > 0) { diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl index 1a783d11883..5bd2e303d2a 100644 --- a/src/bin/pg_verifybackup/t/008_untar.pl +++ b/src/bin/pg_verifybackup/t/008_untar.pl @@ -16,6 +16,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->start; +# Create file with some random data and an arbitrary size, useful to check +# the solidity of the compression and decompression logic. The size of the +# file is chosen to be around 640kB. This has proven to be large enough to +# detect some issues related to LZ4, and low enough to not impact the runtime +# of the test significantly. +my $junk_data = $primary->safe_psql( + 'postgres', qq( + SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '') + FROM generate_series(1, 10240) s(i);)); +my $data_dir = $primary->data_dir; +my $junk_file = "$data_dir/junk"; +open my $jf, '>', $junk_file + or die "Could not create junk file: $!"; +print $jf $junk_data; +close $jf; + my $backup_path = $primary->backup_dir . '/server-backup'; my $extract_path = $primary->backup_dir . '/extracted-backup'; @@ -42,6 +58,14 @@ my @test_configuration = ( 'decompress_flags' => [ '-d', '-m' ], 'enabled' => check_pg_config("#define USE_LZ4 1") }, + { + 'compression_method' => 'lz4', + 'backup_flags' => [ '--compress', 'server-lz4:5' ], + 'backup_archive' => 'base.tar.lz4', + 'decompress_program' => $ENV{'LZ4'}, + 'decompress_flags' => [ '-d', '-m' ], + 'enabled' => check_pg_config("#define USE_LZ4 1") + }, { 'compression_method' => 'zstd', 'backup_flags' => [ '--compress', 'server-zstd' ], diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl index 44d83e777ff..6050da2bf9d 100644 --- a/src/bin/pg_verifybackup/t/010_client_untar.pl +++ b/src/bin/pg_verifybackup/t/010_client_untar.pl @@ -15,6 +15,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->start; +# Create file with some random data and an arbitrary size, useful to check +# the solidity of the compression and decompression logic. The size of the +# file is chosen to be around 640kB. This has proven to be large enough to +# detect some issues related to LZ4, and low enough to not impact the runtime +# of the test significantly. +my $junk_data = $primary->safe_psql( + 'postgres', qq( + SELECT string_agg(encode(sha256(i::text::bytea), 'hex'), '') + FROM generate_series(1, 10240) s(i);)); +my $data_dir = $primary->data_dir; +my $junk_file = "$data_dir/junk"; +open my $jf, '>', $junk_file + or die "Could not create junk file: $!"; +print $jf $junk_data; +close $jf; + my $backup_path = $primary->backup_dir . '/client-backup'; my $extract_path = $primary->backup_dir . '/extracted-backup'; @@ -42,6 +58,15 @@ my @test_configuration = ( 'output_file' => 'base.tar', 'enabled' => check_pg_config("#define USE_LZ4 1") }, + { + 'compression_method' => 'lz4', + 'backup_flags' => [ '--compress', 'client-lz4:1' ], + 'backup_archive' => 'base.tar.lz4', + 'decompress_program' => $ENV{'LZ4'}, + 'decompress_flags' => ['-d'], + 'output_file' => 'base.tar', + 'enabled' => check_pg_config("#define USE_LZ4 1") + }, { 'compression_method' => 'zstd', 'backup_flags' => [ '--compress', 'client-zstd:5' ],