1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-05 07:21:24 +03:00

Replace the data structure used for keyword lookup.

Previously, ScanKeywordLookup was passed an array of string pointers.
This had some performance deficiencies: the strings themselves might
be scattered all over the place depending on the compiler (and some
quick checking shows that at least with gcc-on-Linux, they indeed
weren't reliably close together).  That led to very cache-unfriendly
behavior as the binary search touched strings in many different pages.
Also, depending on the platform, the string pointers might need to
be adjusted at program start, so that they couldn't be simple constant
data.  And the ScanKeyword struct had been designed with an eye to
32-bit machines originally; on 64-bit it requires 16 bytes per
keyword, making it even more cache-unfriendly.

Redesign so that the keyword strings themselves are allocated
consecutively (as part of one big char-string constant), thereby
eliminating the touch-lots-of-unrelated-pages syndrome.  And get
rid of the ScanKeyword array in favor of three separate arrays:
uint16 offsets into the keyword array, uint16 token codes, and
uint8 keyword categories.  That reduces the overhead per keyword
to 5 bytes instead of 16 (even less in programs that only need
one of the token codes and categories); moreover, the binary search
only touches the offsets array, further reducing its cache footprint.
This also lets us put the token codes somewhere else than the
keyword strings are, which avoids some unpleasant build dependencies.

While we're at it, wrap the data used by ScanKeywordLookup into
a struct that can be treated as an opaque type by most callers.
That doesn't change things much right now, but it will make it
less painful to switch to a hash-based lookup method, as is being
discussed in the mailing list thread.

Most of the change here is associated with adding a generator
script that can build the new data structure from the same
list-of-PG_KEYWORD header representation we used before.
The PG_KEYWORD lists that plpgsql and ecpg used to embed in
their scanner .c files have to be moved into headers, and the
Makefiles have to be taught to invoke the generator script.
This work is also necessary if we're to consider hash-based lookup,
since the generator script is what would be responsible for
constructing a hash table.

Aside from saving a few kilobytes in each program that includes
the keyword table, this seems to speed up raw parsing (flex+bison)
by a few percent.  So it's worth doing even as it stands, though
we think we can gain even more with a follow-on patch to switch
to hash-based lookup.

John Naylor, with further hacking by me

Discussion: https://postgr.es/m/CAJVSVGXdFVU2sgym89XPL=Lv1zOS5=EHHQ8XWNzFL=mTXkKMLw@mail.gmail.com
This commit is contained in:
Tom Lane
2019-01-06 17:02:57 -05:00
parent c5c7fa261f
commit afb0d0712f
32 changed files with 845 additions and 441 deletions

1
src/common/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/kwlist_d.h

View File

@ -41,11 +41,11 @@ override CPPFLAGS += -DVAL_LDFLAGS_EX="\"$(LDFLAGS_EX)\""
override CPPFLAGS += -DVAL_LDFLAGS_SL="\"$(LDFLAGS_SL)\""
override CPPFLAGS += -DVAL_LIBS="\"$(LIBS)\""
override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
override CPPFLAGS := -DFRONTEND -I. -I$(top_srcdir)/src/common $(CPPFLAGS)
LIBS += $(PTHREAD_LIBS)
OBJS_COMMON = base64.o config_info.o controldata_utils.o exec.o file_perm.o \
ip.o keywords.o link-canary.o md5.o pg_lzcompress.o \
ip.o keywords.o kwlookup.o link-canary.o md5.o pg_lzcompress.o \
pgfnames.o psprintf.o relpath.o \
rmtree.o saslprep.o scram-common.o string.o unicode_norm.o \
username.o wait_error.o
@ -65,6 +65,8 @@ OBJS_SRV = $(OBJS_COMMON:%.o=%_srv.o)
all: libpgcommon.a libpgcommon_shlib.a libpgcommon_srv.a
distprep: kwlist_d.h
# libpgcommon is needed by some contrib
install: all installdirs
$(INSTALL_STLIB) libpgcommon.a '$(DESTDIR)$(libdir)/libpgcommon.a'
@ -115,16 +117,18 @@ libpgcommon_srv.a: $(OBJS_SRV)
%_srv.o: %.c %.o
$(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@
# Dependencies of keywords.o need to be managed explicitly to make sure
# generate SQL keyword lookup table to be included into keywords*.o.
kwlist_d.h: $(top_srcdir)/src/include/parser/kwlist.h $(top_srcdir)/src/tools/gen_keywordlist.pl
$(PERL) $(top_srcdir)/src/tools/gen_keywordlist.pl --extern $<
# Dependencies of keywords*.o need to be managed explicitly to make sure
# that you don't get broken parsing code, even in a non-enable-depend build.
# Note that gram.h isn't required for the frontend versions of keywords.o.
$(top_builddir)/src/include/parser/gram.h: $(top_srcdir)/src/backend/parser/gram.y
$(MAKE) -C $(top_builddir)/src/backend $(top_builddir)/src/include/parser/gram.h
keywords.o keywords_shlib.o keywords_srv.o: kwlist_d.h
keywords.o: $(top_srcdir)/src/include/parser/kwlist.h
keywords_shlib.o: $(top_srcdir)/src/include/parser/kwlist.h
keywords_srv.o: $(top_builddir)/src/include/parser/gram.h $(top_srcdir)/src/include/parser/kwlist.h
clean distclean maintainer-clean:
# kwlist_d.h is in the distribution tarball, so it is not cleaned here.
clean distclean:
rm -f libpgcommon.a libpgcommon_shlib.a libpgcommon_srv.a
rm -f $(OBJS_FRONTEND) $(OBJS_SHLIB) $(OBJS_SRV)
maintainer-clean: distclean
rm -f kwlist_d.h

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* keywords.c
* lexical token lookup for key words in PostgreSQL
* PostgreSQL's list of SQL keywords
*
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
@ -13,102 +13,21 @@
*
*-------------------------------------------------------------------------
*/
#ifndef FRONTEND
#include "postgres.h"
#else
#include "postgres_fe.h"
#endif
#ifndef FRONTEND
#include "parser/gramparse.h"
#define PG_KEYWORD(a,b,c) {a,b,c},
#else
#include "c.h"
#include "common/keywords.h"
/*
* We don't need the token number for frontend uses, so leave it out to avoid
* requiring backend headers that won't compile cleanly here.
*/
#define PG_KEYWORD(a,b,c) {a,0,c},
#endif /* FRONTEND */
/* ScanKeywordList lookup data for SQL keywords */
#include "kwlist_d.h"
const ScanKeyword ScanKeywords[] = {
/* Keyword categories for SQL keywords */
#define PG_KEYWORD(kwname, value, category) category,
const uint8 ScanKeywordCategories[SCANKEYWORDS_NUM_KEYWORDS] = {
#include "parser/kwlist.h"
};
const int NumScanKeywords = lengthof(ScanKeywords);
/*
* ScanKeywordLookup - see if a given word is a keyword
*
* The table to be searched is passed explicitly, so that this can be used
* to search keyword lists other than the standard list appearing above.
*
* Returns a pointer to the ScanKeyword table entry, or NULL if no match.
*
* The match is done case-insensitively. Note that we deliberately use a
* dumbed-down case conversion that will only translate 'A'-'Z' into 'a'-'z',
* even if we are in a locale where tolower() would produce more or different
* translations. This is to conform to the SQL99 spec, which says that
* keywords are to be matched in this way even though non-keyword identifiers
* receive a different case-normalization mapping.
*/
const ScanKeyword *
ScanKeywordLookup(const char *text,
const ScanKeyword *keywords,
int num_keywords)
{
int len,
i;
char word[NAMEDATALEN];
const ScanKeyword *low;
const ScanKeyword *high;
len = strlen(text);
/* We assume all keywords are shorter than NAMEDATALEN. */
if (len >= NAMEDATALEN)
return NULL;
/*
* Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
{
char ch = text[i];
if (ch >= 'A' && ch <= 'Z')
ch += 'a' - 'A';
word[i] = ch;
}
word[len] = '\0';
/*
* Now do a binary search using plain strcmp() comparison.
*/
low = keywords;
high = keywords + (num_keywords - 1);
while (low <= high)
{
const ScanKeyword *middle;
int difference;
middle = low + (high - low) / 2;
difference = strcmp(middle->name, word);
if (difference == 0)
return middle;
else if (difference < 0)
low = middle + 1;
else
high = middle - 1;
}
return NULL;
}
#undef PG_KEYWORD

94
src/common/kwlookup.c Normal file
View File

@ -0,0 +1,94 @@
/*-------------------------------------------------------------------------
*
* kwlookup.c
* Key word lookup for PostgreSQL
*
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/common/kwlookup.c
*
*-------------------------------------------------------------------------
*/
#include "c.h"
#include "common/kwlookup.h"
/*
* ScanKeywordLookup - see if a given word is a keyword
*
* The list of keywords to be matched against is passed as a ScanKeywordList.
*
* Returns the keyword number (0..N-1) of the keyword, or -1 if no match.
* Callers typically use the keyword number to index into information
* arrays, but that is no concern of this code.
*
* The match is done case-insensitively. Note that we deliberately use a
* dumbed-down case conversion that will only translate 'A'-'Z' into 'a'-'z',
* even if we are in a locale where tolower() would produce more or different
* translations. This is to conform to the SQL99 spec, which says that
* keywords are to be matched in this way even though non-keyword identifiers
* receive a different case-normalization mapping.
*/
int
ScanKeywordLookup(const char *text,
const ScanKeywordList *keywords)
{
int len,
i;
char word[NAMEDATALEN];
const char *kw_string;
const uint16 *kw_offsets;
const uint16 *low;
const uint16 *high;
len = strlen(text);
if (len > keywords->max_kw_len)
return -1; /* too long to be any keyword */
/* We assume all keywords are shorter than NAMEDATALEN. */
Assert(len < NAMEDATALEN);
/*
* Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
{
char ch = text[i];
if (ch >= 'A' && ch <= 'Z')
ch += 'a' - 'A';
word[i] = ch;
}
word[len] = '\0';
/*
* Now do a binary search using plain strcmp() comparison.
*/
kw_string = keywords->kw_string;
kw_offsets = keywords->kw_offsets;
low = kw_offsets;
high = kw_offsets + (keywords->num_keywords - 1);
while (low <= high)
{
const uint16 *middle;
int difference;
middle = low + (high - low) / 2;
difference = strcmp(kw_string + *middle, word);
if (difference == 0)
return middle - kw_offsets;
else if (difference < 0)
low = middle + 1;
else
high = middle - 1;
}
return -1;
}