mirror of
https://github.com/postgres/postgres.git
synced 2025-07-28 23:42:10 +03:00
Sync up.
This commit is contained in:
@ -1,5 +1,5 @@
|
|||||||
<!--
|
<!--
|
||||||
$Header: /cvsroot/pgsql/doc/src/sgml/ref/Attic/pg_upgrade.sgml,v 1.17 2002/01/11 06:08:02 momjian Exp $
|
$Header: /cvsroot/pgsql/doc/src/sgml/ref/Attic/pg_upgrade.sgml,v 1.18 2002/01/11 20:34:14 momjian Exp $
|
||||||
PostgreSQL documentation
|
PostgreSQL documentation
|
||||||
-->
|
-->
|
||||||
|
|
||||||
@ -154,7 +154,8 @@ $ pg_upgrade -s schema.out data.old
|
|||||||
|
|
||||||
<note>
|
<note>
|
||||||
<para>
|
<para>
|
||||||
pg_upgrade does not migrate large objects.
|
pg_upgrade does not handle custom tables/indexes/sequences in template1.
|
||||||
|
It does handle other template1 object customizations.
|
||||||
</para>
|
</para>
|
||||||
</note>
|
</note>
|
||||||
</procedure>
|
</procedure>
|
||||||
|
@ -5,14 +5,23 @@
|
|||||||
|
|
||||||
#set -x
|
#set -x
|
||||||
|
|
||||||
# $Header: /cvsroot/pgsql/src/bin/pg_dump/Attic/pg_upgrade,v 1.28 2002/01/11 06:48:41 momjian Exp $
|
# Set this to "Y" to enable this program
|
||||||
|
ENABLE="N"
|
||||||
|
|
||||||
|
# UPGRADE_VERSION is the expected old database version
|
||||||
|
UPGRADE_VERSION="7.1"
|
||||||
|
|
||||||
|
# Hard-wired from pg_class in 7.1. I wish there was another way.
|
||||||
|
# Are these fixed values for that release? XXX
|
||||||
|
SRC_LARGEOBJECT_OID=16948
|
||||||
|
SRC_LARGEOBJECT_IDX_OID=17148
|
||||||
|
|
||||||
|
# $Header: /cvsroot/pgsql/src/bin/pg_dump/Attic/pg_upgrade,v 1.29 2002/01/11 20:34:14 momjian Exp $
|
||||||
#
|
#
|
||||||
# NOTE: we must be sure to update the version-checking code a few dozen lines
|
# NOTE: we must be sure to update the version-checking code a few dozen lines
|
||||||
# below for each new PostgreSQL release.
|
# below for each new PostgreSQL release.
|
||||||
|
|
||||||
TMPFILE="/tmp/pgupgrade.$$"
|
trap "rm -f /tmp/$$.*" 0 1 2 3 15
|
||||||
|
|
||||||
trap "rm -f $TMPFILE" 0 1 2 3 15
|
|
||||||
|
|
||||||
SCHEMA=""
|
SCHEMA=""
|
||||||
while [ "$#" -gt 1 ]
|
while [ "$#" -gt 1 ]
|
||||||
@ -71,15 +80,13 @@ then echo "Cannot read ./$OLDDIR/PG_VERSION --- something is wrong." 1>&2
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get the actual versions seen in the data dirs.
|
# Get the actual versions seen in the data dirs.
|
||||||
|
|
||||||
DEST_VERSION=`cat ./data/PG_VERSION`
|
DEST_VERSION=`cat ./data/PG_VERSION`
|
||||||
SRC_VERSION=`cat ./$OLDDIR/PG_VERSION`
|
SRC_VERSION=`cat ./$OLDDIR/PG_VERSION`
|
||||||
|
|
||||||
# Check for version compatibility.
|
# Check for version compatibility.
|
||||||
# This code will need to be updated/reviewed for each new PostgreSQL release.
|
# This code will need to be updated/reviewed for each new PostgreSQL release.
|
||||||
|
|
||||||
# UPGRADE_VERSION is the expected output database version
|
|
||||||
UPGRADE_VERSION="7.1"
|
|
||||||
|
|
||||||
if [ "$DEST_VERSION" != "$UPGRADE_VERSION" -a "$DEST_VERSION" != "$SRC_VERSION" ]
|
if [ "$DEST_VERSION" != "$UPGRADE_VERSION" -a "$DEST_VERSION" != "$SRC_VERSION" ]
|
||||||
then echo "`basename $0` is for PostgreSQL version $UPGRADE_VERSION, but ./data/PG_VERSION contains $DEST_VERSION." 1>&2
|
then echo "`basename $0` is for PostgreSQL version $UPGRADE_VERSION, but ./data/PG_VERSION contains $DEST_VERSION." 1>&2
|
||||||
echo "Did you run initdb for version $UPGRADE_VERSION?" 1>&2
|
echo "Did you run initdb for version $UPGRADE_VERSION?" 1>&2
|
||||||
@ -92,6 +99,8 @@ fi
|
|||||||
# enough to compare dotted version strings properly. Using a case statement
|
# enough to compare dotted version strings properly. Using a case statement
|
||||||
# looks uglier but is more flexible.
|
# looks uglier but is more flexible.
|
||||||
|
|
||||||
|
if [ "$ENABLE" != "Y" ]
|
||||||
|
then
|
||||||
case "$SRC_VERSION" in
|
case "$SRC_VERSION" in
|
||||||
# 7.2) ;;
|
# 7.2) ;;
|
||||||
*) echo "Sorry, `basename $0` cannot upgrade database version $SRC_VERSION to $DEST_VERSION." 1>&2
|
*) echo "Sorry, `basename $0` cannot upgrade database version $SRC_VERSION to $DEST_VERSION." 1>&2
|
||||||
@ -99,6 +108,9 @@ case "$SRC_VERSION" in
|
|||||||
echo "You will need to dump and restore using pg_dumpall." 1>&2
|
echo "You will need to dump and restore using pg_dumpall." 1>&2
|
||||||
exit 1;;
|
exit 1;;
|
||||||
esac
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check for proper pg_resetxlog version
|
||||||
|
|
||||||
pg_resetxlog 2>/dev/null
|
pg_resetxlog 2>/dev/null
|
||||||
# file not found status is normally 127, not 1
|
# file not found status is normally 127, not 1
|
||||||
@ -117,13 +129,27 @@ fi
|
|||||||
# If the XID is > 2 billion, 7.1 database will have non-frozen XID's in
|
# If the XID is > 2 billion, 7.1 database will have non-frozen XID's in
|
||||||
# low numbers, and 7.2 will think they are in the future --- bad.
|
# low numbers, and 7.2 will think they are in the future --- bad.
|
||||||
|
|
||||||
XID=`pg_resetxlog -n "$OLDDIR" | grep "NextXID" | awk -F' *' '{print $4}'`
|
SRC_XID=`pg_resetxlog -n "$OLDDIR" | grep "NextXID" | awk -F' *' '{print $4}'`
|
||||||
if [ "$SRC_VERSION" = "7.1" -a "$XID" -gt 2000000000 ]
|
if [ "$SRC_VERSION" = "7.1" -a "$XID" -gt 2000000000 ]
|
||||||
then echo "XID too high for $0.; exiting" 1>&2
|
then echo "XID too high for $0.; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
DST_XID=`pg_resetxlog -n data | grep "NextXID" | awk -F' *' '{print $4}'`
|
||||||
|
|
||||||
|
# compare locales to make sure they match
|
||||||
|
|
||||||
|
pg_resetxlog -n "$OLDDIR" | grep "^LC_" > /tmp/$$.0
|
||||||
|
pg_resetxlog -n data | grep "^LC_" > /tmp/$$.1
|
||||||
|
if ! diff /tmp/$$.0 /tmp/$$.1 >/dev/null
|
||||||
|
then echo "Locales do not match between the two versions.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
###################################
|
||||||
# Checking done. Ready to proceed.
|
# Checking done. Ready to proceed.
|
||||||
|
###################################
|
||||||
|
|
||||||
|
|
||||||
# Execute the schema script to create everything
|
# Execute the schema script to create everything
|
||||||
|
|
||||||
@ -149,12 +175,27 @@ fi
|
|||||||
|
|
||||||
# Used for scans looking for a database/table name match
|
# Used for scans looking for a database/table name match
|
||||||
# New oid is looked up
|
# New oid is looked up
|
||||||
pg_dumpall -s > $TMPFILE 2>/dev/null
|
pg_dumpall -s > /tmp/$$.3 2>/dev/null
|
||||||
if [ "$?" -ne 0 ]
|
if [ "$?" -ne 0 ]
|
||||||
then echo "Unable to dump schema of new database.; exiting" 1>&2
|
then echo "Unable to dump schema of new database.; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Get pg_largeobject oids for movement
|
||||||
|
|
||||||
|
DST_LARGEOBJECT_OID=`psql -d template1 -At -c "SELECT oid from pg_class where relname = 'pg_largeobject';"`
|
||||||
|
DST_LARGEOBJECT_IDX_OID=`psql -d template1 -At -c "SELECT oid from pg_class where relname = 'pg_largeobject_loid_pn_index';"`
|
||||||
|
if [ "$LARGEOBJECT_OID" -eq 0 -o "$LARGEOBJECT_IDX_OID" -eq 0 ]
|
||||||
|
then echo "Unable to find large object oid.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$SRC_VERSION" = "$DST_VERSION" ]
|
||||||
|
then # Versions are the same so we can get pg_largeobject oid this way
|
||||||
|
SRC_LARGEOBJECT_IDX_OID="$DST_LARGEOBJECT_OID"
|
||||||
|
SRC_LARGEOBJECT_IDX_OID="$DST_LARGEOBJECT_IDX_OID"
|
||||||
|
fi
|
||||||
|
|
||||||
# we are done with SQL database access
|
# we are done with SQL database access
|
||||||
# shutdown forces buffers to disk
|
# shutdown forces buffers to disk
|
||||||
|
|
||||||
@ -166,6 +207,8 @@ fi
|
|||||||
|
|
||||||
echo "Commit fixes complete, moving data files..."
|
echo "Commit fixes complete, moving data files..."
|
||||||
|
|
||||||
|
# Move table/index/sequence files
|
||||||
|
|
||||||
cat "$SCHEMA" | while read LINE
|
cat "$SCHEMA" | while read LINE
|
||||||
do
|
do
|
||||||
if /bin/echo "$LINE" | grep -q '^\\connect [^ ]*$'
|
if /bin/echo "$LINE" | grep -q '^\\connect [^ ]*$'
|
||||||
@ -205,13 +248,17 @@ do
|
|||||||
newdb == "'"$DB"'" && \
|
newdb == "'"$DB"'" && \
|
||||||
$3 == "'"$TABLE"'" \
|
$3 == "'"$TABLE"'" \
|
||||||
{ ret=newoid; exit}
|
{ ret=newoid; exit}
|
||||||
END { print ret;}' $TMPFILE`
|
END { print ret;}' /tmp/$$.3`
|
||||||
if [ "$NEWOID" -eq 0 ]
|
if [ "$NEWOID" -eq 0 ]
|
||||||
then echo "Move of database $DB, OID $OID, table $TABLE failed.
|
then echo "Move of database $DB, OID $OID, table $TABLE failed.
|
||||||
New oid not found; exiting" 1>&2
|
New oid not found; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We use stars so we don't have to worry about database oids
|
# We use stars so we don't have to worry about database oids
|
||||||
|
|
||||||
|
# Test to make sure there is exactly one matching file on each place
|
||||||
|
|
||||||
if [ `ls "$OLDDIR"/base/*/"$OID" | wc -l` -eq 0 ]
|
if [ `ls "$OLDDIR"/base/*/"$OID" | wc -l` -eq 0 ]
|
||||||
then echo "Move of database $DB, OID $OID, table $TABLE failed.
|
then echo "Move of database $DB, OID $OID, table $TABLE failed.
|
||||||
File not found; exiting" 1>&2
|
File not found; exiting" 1>&2
|
||||||
@ -232,26 +279,101 @@ File not found; exiting" 1>&2
|
|||||||
Too many found; exiting" 1>&2
|
Too many found; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
mv -f "$OLDDIR"/base/*/"$OID" data/base/*/"$NEWOID"
|
|
||||||
|
# Move files
|
||||||
|
|
||||||
|
SRCDB=`basename \`dirname $OLDDIR"/base/*/"$OID"\``
|
||||||
|
DSTDB=`basename \'dirname data/base/*/"$NEWOID"\``
|
||||||
|
mv -f "$OLDIR"/base/"$SRCDB"/"$OID" data/base/"$DSTDB"/"$NEWOID"
|
||||||
if [ "$?" -ne 0 ]
|
if [ "$?" -ne 0 ]
|
||||||
then echo "Move of database $DB, OID $OID, table $TABLE
|
then echo "Move of database $DB, OID $OID, table $TABLE
|
||||||
to $NEWOID failed.; exiting" 1>&2
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# handle table extents
|
||||||
|
ls "$OLDDIR"/base/"$SRCDB"/"$OID".* | while read FILE
|
||||||
|
do
|
||||||
|
EXT=`basename "$FILE" | sed 's/[^[^\.]*\.\(.*\)$/\1/'`
|
||||||
|
mv -f "$FILE" data/base/"$DSTDB"/"$NEWOID"."$EXT"
|
||||||
|
if [ "$?" -ne 0 ]
|
||||||
|
then echo "Move of database $DB, OID $OID, table $TABLE
|
||||||
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# handle pg_largeobject
|
||||||
|
# We use the unique oid's to tell use where to move the
|
||||||
|
# pg_largeobject files.
|
||||||
|
|
||||||
|
if [ -f "$OLDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_OID" ]
|
||||||
|
then mv "$OLDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_OID" \
|
||||||
|
data/base/"$DSTDB"/"$DST_LARGEOBJECT_OID"
|
||||||
|
if [ "$?" -ne 0 ]
|
||||||
|
then echo "Move of large object for database $DB
|
||||||
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# handle table extents
|
||||||
|
ls "$OLDDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_OID".* | while read FILE
|
||||||
|
do
|
||||||
|
EXT=`basename "$FILE" | sed 's/[^[^\.]*\.\(.*\)$/\1/'`
|
||||||
|
mv -f "$FILE" data/base/"$DSTDB"/"$DST_LARGEOBJECT_OID"."$EXT"
|
||||||
|
if [ "$?" -ne 0 ]
|
||||||
|
then echo "Move of large object for database $DB
|
||||||
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Handle pg_largeobject_loid_pn_index
|
||||||
|
if [ -f "$OLDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_IDX_OID" ]
|
||||||
|
then mv "$OLDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_IDX_OID" \
|
||||||
|
data/base/"$DSTDB"/"$DST_LARGEOBJECT_IDX_OID"
|
||||||
|
if [ "$?" -ne 0 ]
|
||||||
|
then echo "Move of large object for database $DB
|
||||||
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# handle table extents
|
||||||
|
ls "$OLDDIR"/base/"$SRCDB"/"$SRC_LARGEOBJECT_IDX_OID".* | while read FILE
|
||||||
|
do
|
||||||
|
EXT=`basename "$FILE" | sed 's/[^[^\.]*\.\(.*\)$/\1/'`
|
||||||
|
mv -f "$FILE" data/base/"$DSTDB"/"$DST_LARGEOBJECT_IDX_OID"."$EXT"
|
||||||
|
if [ "$?" -ne 0 ]
|
||||||
|
then echo "Move of large object for database $DB
|
||||||
|
to $NEWOID failed.; exiting" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
TABLE=""
|
TABLE=""
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
# Set this so the next VACUUM sets the old row XID's as "frozen"
|
# Set this so future backends don't think these tuples are their own
|
||||||
|
# because it matches their own XID.
|
||||||
# Commit status already updated by vacuum above
|
# Commit status already updated by vacuum above
|
||||||
|
# Set to maximum XID just in case SRC wrapped around recently and
|
||||||
|
# is lower than DST's database
|
||||||
|
if [ "$SRC_XID" -gt "$DST_XID" ]
|
||||||
|
then MAX_XID="$SRC_XID"
|
||||||
|
else MAX_XID="$DST_XID"
|
||||||
|
fi
|
||||||
|
|
||||||
pg_resetxlog -x "$XID" data
|
pg_resetxlog -x "$MAX_XID" data
|
||||||
if [ "$?" -ne 0 ]
|
if [ "$?" -ne 0 ]
|
||||||
then echo "Unable to set new XID.; exiting" 1>&2
|
then echo "Unable to set new XID.; exiting" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Move over old WAL
|
||||||
|
|
||||||
|
rm -r data/pg_xlog
|
||||||
|
mv -f "$OLDDIR"/pg_xlog data
|
||||||
|
|
||||||
# Set last checkpoint location from old database
|
# Set last checkpoint location from old database
|
||||||
|
|
||||||
CHKPOINT=`pg_resetxlog -n "$OLDDIR" | grep "checkpoint location:" |
|
CHKPOINT=`pg_resetxlog -n "$OLDDIR" | grep "checkpoint location:" |
|
||||||
@ -283,8 +405,7 @@ if [ "$SRC_VERSION" = "7.1" ]
|
|||||||
then echo "Set int8 sequence values from 7.1..."
|
then echo "Set int8 sequence values from 7.1..."
|
||||||
|
|
||||||
psql -d template1 -At -c "SELECT datname FROM pg_database" |
|
psql -d template1 -At -c "SELECT datname FROM pg_database" |
|
||||||
grep -v '^template0$' | # no system databases
|
grep -v '^template0$' | # template1 OK
|
||||||
grep -v '^template1$' | # no system databases
|
|
||||||
while read DB
|
while read DB
|
||||||
do
|
do
|
||||||
echo "$DB"
|
echo "$DB"
|
||||||
|
Reference in New Issue
Block a user