From b990232c5d5447194857632522389142fcbb841d Mon Sep 17 00:00:00 2001 From: Neil Conway Date: Wed, 17 Nov 2004 00:18:26 +0000 Subject: [PATCH] Backpatch fix from HEAD: Prevent a backend crash when processing CREATE TABLE commands with more than 65K columns, or when the created table has more than 65K columns due to adding inherited columns from parent relations. Fix a similar crash when processing SELECT queries with more than 65K target list entries. In all three cases we would eventually detect the error and elog, but the check was being made too late. --- src/backend/commands/tablecmds.c | 29 ++++++++++++++++++++++++++++- src/backend/parser/analyze.c | 14 +++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 2c3a3760f2f..7bd8cbe8ffd 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.91.2.1 2004/07/17 17:28:47 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.91.2.2 2004/11/17 00:18:23 neilc Exp $ * *------------------------------------------------------------------------- */ @@ -488,6 +488,23 @@ MergeAttributes(List *schema, List *supers, bool istemp, * defaults */ int child_attno; + /* + * Check for and reject tables with too many columns. We perform + * this check relatively early for two reasons: (a) we don't run + * the risk of overflowing an AttrNumber in subsequent code (b) an + * O(n^2) algorithm is okay if we're processing <= 1600 columns, + * but could take minutes to execute if the user attempts to + * create a table with hundreds of thousands of columns. + * + * Note that we also need to check that any we do not exceed this + * figure after including columns from inherited relations. + */ + if (length(schema) > MaxHeapAttributeNumber) + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_COLUMNS), + errmsg("tables can have at most %d columns", + MaxHeapAttributeNumber))); + /* * Check for duplicate names in the explicit list of attributes. * @@ -796,6 +813,16 @@ MergeAttributes(List *schema, List *supers, bool istemp, } schema = inhSchema; + + /* + * Check that we haven't exceeded the legal # of columns after + * merging in inherited columns. + */ + if (length(schema) > MaxHeapAttributeNumber) + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_COLUMNS), + errmsg("tables can have at most %d columns", + MaxHeapAttributeNumber))); } /* diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index c338e0f2281..8ea47922b18 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.290.2.1 2003/11/05 22:00:52 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.290.2.2 2004/11/17 00:18:26 neilc Exp $ * *------------------------------------------------------------------------- */ @@ -440,6 +440,18 @@ transformStmt(ParseState *pstate, Node *parseTree, result->querySource = QSRC_ORIGINAL; result->canSetTag = true; + /* + * Check that we did not produce too many resnos; at the very + * least we cannot allow more than 2^16, since that would exceed + * the range of a AttrNumber. It seems safest to use + * MaxTupleAttributeNumber. + */ + if (pstate->p_next_resno - 1 > MaxTupleAttributeNumber) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("target lists can have at most %d entries", + MaxTupleAttributeNumber))); + return result; }