1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-03 15:22:11 +03:00

Use SASLprep to normalize passwords for SCRAM authentication.

An important step of SASLprep normalization, is to convert the string to
Unicode normalization form NFKC. Unicode normalization requires a fairly
large table of character decompositions, which is generated from data
published by the Unicode consortium. The script to generate the table is
put in src/common/unicode, as well test code for the normalization.
A pre-generated version of the tables is included in src/include/common,
so you don't need the code in src/common/unicode to build PostgreSQL, only
if you wish to modify the normalization tables.

The SASLprep implementation depends on the UTF-8 functions from
src/backend/utils/mb/wchar.c. So to use it, you must also compile and link
that. That doesn't change anything for the current users of these
functions, the backend and libpq, as they both already link with wchar.o.
It would be good to move those functions into a separate file in
src/commmon, but I'll leave that for another day.

No documentation changes included, because there is no details on the
SCRAM mechanism in the docs anyway. An overview on that in the protocol
specification would probably be good, even though SCRAM is documented in
detail in RFC5802. I'll write that as a separate patch. An important thing
to mention there is that we apply SASLprep even on invalid UTF-8 strings,
to support other encodings.

Patch by Michael Paquier and me.

Discussion: https://www.postgresql.org/message-id/CAB7nPqSByyEmAVLtEf1KxTRh=PWNKiWKEKQR=e1yGehz=wbymQ@mail.gmail.com
This commit is contained in:
Heikki Linnakangas
2017-04-07 14:56:05 +03:00
parent 32e33a7979
commit 60f11b87a2
19 changed files with 11322 additions and 32 deletions

7
src/common/unicode/.gitignore vendored Normal file
View File

@@ -0,0 +1,7 @@
/norm_test
/norm_test_table.h
# Files downloaded from the Unicode Character Database
/CompositionExclusions.txt
/NormalizationTest.txt
/UnicodeData.txt

View File

@@ -0,0 +1,53 @@
#-------------------------------------------------------------------------
#
# Makefile
# Makefile for src/common/unicode
#
# IDENTIFICATION
# src/common/unicode/Makefile
#
#-------------------------------------------------------------------------
subdir = src/common/unicode
top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
LIBS += $(PTHREAD_LIBS)
# By default, do nothing.
all:
DOWNLOAD = wget -O $@ --no-use-server-timestamps
# These files are part of the Unicode Character Database. Download
# them on demand.
UnicodeData.txt CompositionExclusions.txt NormalizationTest.txt:
$(DOWNLOAD) http://unicode.org/Public/UNIDATA/$(@F)
# Generation of conversion tables used for string normalization with
# UTF-8 strings.
unicode_norm_table.h: generate-unicode_norm_table.pl UnicodeData.txt CompositionExclusions.txt
$(PERL) generate-unicode_norm_table.pl
# Test suite
normalization-check: norm_test
./norm_test
norm_test: norm_test.o ../unicode_norm.o
norm_test.o: norm_test_table.h
norm_test_table.h: generate-norm_test_table.pl NormalizationTest.txt
perl generate-norm_test_table.pl NormalizationTest.txt $@
.PHONY: normalization-check
clean:
rm -f $(OBJS) norm_test norm_test.o
distclean: clean
rm -f UnicodeData.txt CompositionExclusions.txt NormalizationTest.txt norm_test_table.h unicode_norm_table.h
maintainer-clean: distclean

35
src/common/unicode/README Normal file
View File

@@ -0,0 +1,35 @@
This directory contains tools to generate the tables in
src/include/common/unicode_norm.h, used for Unicode normalization. The
generated .h file is included in the source tree, so these are normally not
needed to build PostgreSQL, only if you need to re-generate the .h file
from the Unicode data files for some reason, e.g. to update to a new version
of Unicode.
Generating unicode_norm_table.h
-------------------------------
1. Download the Unicode data file, UnicodeData.txt, from the Unicode
consortium and place it to the current directory. Run the perl script
"norm_test_generate.pl", to process it, and to generate the
"unicode_norm_table.h" file. The Makefile contains a rule to download the
data files if they don't exist.
make unicode_norm_table.h
2. Inspect the resulting header file. Once you're happy with it, copy it to
the right location.
cp unicode_norm_table.h ../../../src/include/common/
Tests
-----
The Unicode consortium publishes a comprehensive test suite for the
normalization algorithm, in a file called NormalizationTest.txt. This
directory also contains a perl script and some C code, to run our
normalization code with all the test strings in NormalizationTest.txt.
To download NormalizationTest.txt and run the tests:
make normalization-check

View File

@@ -0,0 +1,102 @@
#!/usr/bin/perl
#
# Read Unicode consortium's normalization test suite, NormalizationTest.txt,
# and generate a C array from it, for norm_test.c.
#
# NormalizationTest.txt is part of the Unicode Character Database.
#
# Copyright (c) 2000-2017, PostgreSQL Global Development Group
use strict;
use warnings;
use File::Basename;
die "Usage: $0 INPUT_FILE OUTPUT_FILE\n" if @ARGV != 2;
my $input_file = $ARGV[0];
my $output_file = $ARGV[1];
my $output_base = basename($output_file);
# Open the input and output files
open my $INPUT, $input_file
or die "Could not open input file $input_file: $!";
open my $OUTPUT, "> $output_file"
or die "Could not open output file $output_file: $!\n";
# Print header of output file.
print $OUTPUT <<HEADER;
/*-------------------------------------------------------------------------
*
* norm_test_table.h
* Test strings for Unicode normalization.
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/common/unicode/norm_test_table.h
*
*-------------------------------------------------------------------------
*/
/*
* File auto-generated by src/common/unicode/generate-norm_test_table.pl, do
* not edit. There is deliberately not an #ifndef PG_NORM_TEST_TABLE_H
* here.
*/
typedef struct
{
int linenum;
pg_wchar input[50];
pg_wchar output[50];
} pg_unicode_test;
/* test table */
HEADER
print $OUTPUT
"static const pg_unicode_test UnicodeNormalizationTests[] =\n{\n";
# Helper routine to conver a space-separated list of Unicode characters to
# hexadecimal list format, suitable for outputting in a C array.
sub codepoint_string_to_hex
{
my $codepoint_string = shift;
my $result;
foreach (split(' ', $codepoint_string))
{
my $cp = $_;
my $utf8 = "0x$cp, ";
$result .= $utf8;
}
$result .= '0'; # null-terminated the array
return $result;
}
# Process the input file line by line
my $linenum = 0;
while (my $line = <$INPUT>)
{
$linenum = $linenum + 1;
if ($line =~ /^\s*#/) { next; } # ignore comments
if ($line =~ /^@/) { next; } # ignore @Part0 like headers
# Split the line wanted and get the fields needed:
#
# source; NFC; NFD; NFKC; NFKD
my ($source, $nfc, $nfd, $nfkc, $nfkd) = split(';', $line);
my $source_utf8 = codepoint_string_to_hex($source);
my $nfkc_utf8 = codepoint_string_to_hex($nfkc);
print $OUTPUT "\t{ $linenum, { $source_utf8 }, { $nfkc_utf8 } },\n";
}
# Output terminator entry
print $OUTPUT "\t{ 0, { 0 }, { 0 } }";
print $OUTPUT "\n};\n";
close $OUTPUT;
close $INPUT;

View File

@@ -0,0 +1,226 @@
#!/usr/bin/perl
#
# Generate a composition table, using Unicode data files as input
#
# Input: UnicodeData.txt and CompositionExclusions.txt
# Output: unicode_norm_table.h
#
# Copyright (c) 2000-2017, PostgreSQL Global Development Group
use strict;
use warnings;
my $output_file = "unicode_norm_table.h";
my $FH;
# Read list of codes that should be excluded from re-composition.
my @composition_exclusion_codes = ();
open($FH, "CompositionExclusions.txt")
or die "Could not open CompositionExclusions.txt: $!.";
while (my $line = <$FH>)
{
if ($line =~ /^([[:xdigit:]]+)/)
{
push @composition_exclusion_codes, $1;
}
}
close $FH;
# Read entries from UnicodeData.txt into a list, and a hash table. We need
# three fields from each row: the codepoint, canonical combining class,
# and character decomposition mapping
my @characters = ();
my %character_hash = ();
open($FH, "UnicodeData.txt") or die "Could not open UnicodeData.txt: $!.";
while (my $line = <$FH>)
{
# Split the line wanted and get the fields needed:
# - Unicode code value
# - Canonical Combining Class
# - Character Decomposition Mapping
my @elts = split(';', $line);
my $code = $elts[0];
my $class = $elts[3];
my $decomp = $elts[5];
# Skip codepoints above U+10FFFF. They cannot be represented in 4 bytes
# in UTF-8, and PostgreSQL doesn't support UTF-8 characters longer than
# 4 bytes. (This is just pro forma, as there aren't any such entries in
# the data file, currently.)
next if hex($code) > 0x10FFFF;
# Skip characters with no decompositions and a class of 0, to reduce the
# table size.
next if $class eq '0' && $decomp eq '';
my %char_entry = (code => $code, class => $class, decomp => $decomp);
push(@characters, \%char_entry);
$character_hash{$code} = \%char_entry;
}
close $FH;
my $num_characters = scalar @characters;
# Start writing out the output file
open my $OUTPUT, "> $output_file"
or die "Could not open output file $output_file: $!\n";
print $OUTPUT <<HEADER;
/*-------------------------------------------------------------------------
*
* unicode_norm_table.h
* Composition table used for Unicode normalization
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/common/unicode_norm_table.h
*
*-------------------------------------------------------------------------
*/
/*
* File auto-generated by src/common/unicode/generate-unicode_norm_table.pl,
* do not edit. There is deliberately not an #ifndef PG_UNICODE_NORM_TABLE_H
* here.
*/
typedef struct
{
uint32 codepoint; /* Unicode codepoint */
uint8 class; /* combining class of character */
uint8 dec_size_flags; /* size and flags of decomposition code list */
uint16 dec_index; /* index into UnicodeDecomp_codepoints, or the
* decomposition itself if DECOMP_INLINE */
} pg_unicode_decomposition;
#define DECOMP_NO_COMPOSE 0x80 /* don't use for re-composition */
#define DECOMP_INLINE 0x40 /* decomposition is stored inline in dec_index */
#define DECOMPOSITION_SIZE(x) ((x)->dec_size_flags & 0x3F)
#define DECOMPOSITION_NO_COMPOSE(x) (((x)->dec_size_flags & DECOMP_NO_COMPOSE) != 0)
#define DECOMPOSITION_IS_INLINE(x) (((x)->dec_size_flags & DECOMP_INLINE) != 0)
/* Table of Unicode codepoints and their decompositions */
static const pg_unicode_decomposition UnicodeDecompMain[$num_characters] =
{
HEADER
my $decomp_index = 0;
my $decomp_string = "";
my $last_code = $characters[-1]->{code};
foreach my $char (@characters)
{
my $code = $char->{code};
my $class = $char->{class};
my $decomp = $char->{decomp};
# The character decomposition mapping field in UnicodeData.txt is a list
# of unicode codepoints, separated by space. But it can be prefixed with
# so-called compatibility formatting tag, like "<compat>", or "<font>".
# The entries with compatibility formatting tags should not be used for
# re-composing characters during normalization, so flag them in the table.
# (The tag doesn't matter, only whether there is a tag or not)
my $compat = 0;
if ($decomp =~ /\<.*\>/)
{
$compat = 1;
$decomp =~ s/\<[^][]*\>//g;
}
my @decomp_elts = split(" ", $decomp);
# Decomposition size
# Print size of decomposition
my $decomp_size = scalar(@decomp_elts);
my $first_decomp = shift @decomp_elts;
my $flags = "";
my $comment = "";
if ($decomp_size == 2)
{
# Should this be used for recomposition?
if ($compat)
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "compatibility mapping";
}
elsif ($character_hash{$first_decomp}
&& $character_hash{$first_decomp}->{class} != 0)
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "non-starter decomposition";
}
else
{
foreach my $lcode (@composition_exclusion_codes)
{
if ($lcode eq $char->{code})
{
$flags .= " | DECOMP_NO_COMPOSE";
$comment = "in exclusion list";
last;
}
}
}
}
if ($decomp_size == 0)
{
print $OUTPUT "\t{0x$code, $class, 0$flags, 0}";
}
elsif ($decomp_size == 1 && length($first_decomp) <= 4)
{
# The decomposition consists of a single codepoint, and it fits
# in a uint16, so we can store it "inline" in the main table.
$flags .= " | DECOMP_INLINE";
print $OUTPUT "\t{0x$code, $class, 1$flags, 0x$first_decomp}";
}
else
{
print $OUTPUT
"\t{0x$code, $class, $decomp_size$flags, $decomp_index}";
# Now save the decompositions into a dedicated area that will
# be written afterwards. First build the entry dedicated to
# a sub-table with the code and decomposition.
$decomp_string .= ",\n" if ($decomp_string ne "");
$decomp_string .= "\t /* $decomp_index */ 0x$first_decomp";
foreach (@decomp_elts)
{
$decomp_string .= ", 0x$_";
}
$decomp_index = $decomp_index + $decomp_size;
}
# Print a comma after all items except the last one.
print $OUTPUT "," unless ($code eq $last_code);
if ($comment ne "")
{
# If the line is wide already, indent the comment with one tab,
# otherwise with two. This is to make the output match the way
# pgindent would mangle it. (This is quite hacky. To do this
# properly, we should actually track how long the line is so far,
# but this works for now.)
print $OUTPUT "\t" if ($decomp_index < 10);
print $OUTPUT "\t/* $comment */" if ($comment ne "");
}
print $OUTPUT "\n";
}
print $OUTPUT "\n};\n\n";
# Print the array of decomposed codes.
print $OUTPUT <<HEADER;
/* codepoints array */
static const uint32 UnicodeDecomp_codepoints[$decomp_index] =
{
$decomp_string
};
HEADER
close $OUTPUT;

View File

@@ -0,0 +1,80 @@
/*-------------------------------------------------------------------------
* norm_test.c
* Program to test Unicode normalization functions.
*
* Portions Copyright (c) 2017, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/common/unicode_norm.c
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common/unicode_norm.h"
#include "norm_test_table.h"
static char *
print_wchar_str(const pg_wchar *s)
{
#define BUF_DIGITS 50
static char buf[BUF_DIGITS * 2 + 1];
int i;
i = 0;
while (*s && i < BUF_DIGITS)
{
snprintf(&buf[i * 2], 3, "%04X", *s);
i++;
s++;
}
buf[i * 2] = '\0';
return buf;
}
static int
pg_wcscmp(const pg_wchar *s1, const pg_wchar *s2)
{
for (;;)
{
if (*s1 < *s2)
return -1;
if (*s1 > *s2)
return 1;
if (*s1 == 0)
return 0;
s1++;
s2++;
}
}
int
main(int argc, char **argv)
{
const pg_unicode_test *test;
for (test = UnicodeNormalizationTests; test->input[0] != 0; test++)
{
pg_wchar *result;
result = unicode_normalize_kc(test->input);
if (pg_wcscmp(test->output, result) != 0)
{
printf("FAILURE (Normalizationdata.txt line %d):\n", test->linenum);
printf("input:\t%s\n", print_wchar_str(test->input));
printf("expected:\t%s\n", print_wchar_str(test->output));
printf("got\t%s\n", print_wchar_str(result));
printf("\n");
exit(1);
}
}
printf("All tests successful!\n");
exit(0);
}