1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-15 19:21:59 +03:00

Remove tabs after spaces in C comments

This was not changed in HEAD, but will be done later as part of a
pgindent run.  Future pgindent runs will also do this.

Report by Tom Lane

Backpatch through all supported branches, but not HEAD
This commit is contained in:
Bruce Momjian
2014-05-06 11:26:25 -04:00
parent d65ecdf411
commit 1d033d3054
600 changed files with 2999 additions and 2999 deletions

View File

@ -85,7 +85,7 @@ intr2num(const Interval *i)
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
* in pg_type. This might be less than sizeof(Interval) if the compiler
* in pg_type. This might be less than sizeof(Interval) if the compiler
* insists on adding alignment padding at the end of the struct.
*/
#define INTERVALSIZE 16

View File

@ -564,7 +564,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
rt_cube_size(datum_r, &size_r);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -580,7 +580,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
{
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -2010,7 +2010,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
* with NULLs for any dropped columns. Otherwise we have to deal with two
* with NULLs for any dropped columns. Otherwise we have to deal with two
* different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
@ -2236,7 +2236,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn)
}
/*
* For non-superusers, insist that the connstr specify a password. This
* For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.

View File

@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2)
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
* is unportable, so we use version-1. However, if it's passed-by-reference,
* is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.

View File

@ -12,7 +12,7 @@
* HEntry: there is one of these for each key _and_ value in an hstore
*
* the position offset points to the _end_ so that we can get the length
* by subtraction from the previous entry. the ISFIRST flag lets us tell
* by subtraction from the previous entry. the ISFIRST flag lets us tell
* whether there is a previous entry.
*/
typedef struct

View File

@ -487,7 +487,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -505,7 +505,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -612,7 +612,7 @@ ltreeparentsel(PG_FUNCTION_ARGS)
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
* non-MCV population. Otherwise use the default selectivity for the
* non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,

View File

@ -430,7 +430,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
}
/*
* Dump all databases. There are no system objects to worry about.
* Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)

View File

@ -102,7 +102,7 @@ CleanupPriorWALFiles(void)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
* deciding whether a segment is still needed. This ensures that
* deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot

View File

@ -916,7 +916,7 @@ pgss_memsize(void)
* caller must hold an exclusive lock on pgss->lock
*
* Note: despite needing exclusive lock, it's not an error for the target
* entry to already exist. This is because pgss_store releases and
* entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/

View File

@ -270,7 +270,7 @@ check_cluster_compatibility(migratorContext *ctx, bool live_check)
FILE *lib_test;
/*
* Test pg_upgrade_support.so is in the proper place. We cannot copy it
* Test pg_upgrade_support.so is in the proper place. We cannot copy it
* ourselves because install directories are typically root-owned.
*/
snprintf(libfile, sizeof(libfile), "%s/pg_upgrade_support%s", ctx->new.libpath,

View File

@ -27,7 +27,7 @@ static void putenv2(migratorContext *ctx, const char *var, const char *val);
* pg_control data. pg_resetxlog cannot be run while the server is running
* so we use pg_controldata; pg_controldata doesn't provide all the fields
* we need to actually perform the migration, but it provides enough for
* check mode. We do not implement pg_resetxlog -n because it is hard to
* check mode. We do not implement pg_resetxlog -n because it is hard to
* return valid xid data for a running server.
*/
void

View File

@ -33,7 +33,7 @@ generate_old_dump(migratorContext *ctx)
* split_old_dump
*
* This function splits pg_dumpall output into global values and
* database creation, and per-db schemas. This allows us to create
* database creation, and per-db schemas. This allows us to create
* the toast place holders between restoring these two parts of the
* dump. We split on the first "\connect " after a CREATE ROLE
* username match; this is where the per-db restore starts.

View File

@ -117,7 +117,7 @@ verify_directories(migratorContext *ctx)
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
* directory. If any of the subdirectories are missing (or secured against
* directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
@ -153,7 +153,7 @@ check_data_dir(migratorContext *ctx, const char *pg_data)
* check_bin_dir()
*
* This function searches for the executables that we expect to find
* in the binaries directory. If we find that a required executable
* in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/

View File

@ -391,7 +391,7 @@ win32_pghardlink(const char *src, const char *dst)
/*
* copy_dir()
*
* Copies either a directory or a single file within a directory. If the
* Copies either a directory or a single file within a directory. If the
* source argument names a directory, we recursively copy that directory,
* otherwise we copy a single file.
*/

View File

@ -28,11 +28,11 @@ static pageCnvCtx *loadConverterPlugin(migratorContext *ctx,
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
* to the new format. If the versions are identical, this function just
* to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
*
* If successful this function sets *result and returns NULL. If an error
* If successful this function sets *result and returns NULL. If an error
* occurs, this function returns an error message in the form of an null-terminated
* string.
*/
@ -122,7 +122,7 @@ getPageVersion(migratorContext *ctx, uint16 *version, const char *pathName)
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
* newPageVersion and oldPageVersion. If a plugin is found, this
* newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.

View File

@ -77,7 +77,7 @@ transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
/*
* get_pg_database_relfilenode()
*
* Retrieves the relfilenode for a few system-catalog tables. We need these
* Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void

View File

@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -9,7 +9,7 @@
* entirely in crypt_blowfish.c.
*
* Put bcrypt generator also here as crypt-blowfish.c
* may not be compiled always. -- marko
* may not be compiled always. -- marko
*/
#include "postgres.h"

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -53,7 +53,7 @@
/*
* There is some confusion about whether and how to carry forward
* the state of the pools. Seems like original Fortuna does not
* the state of the pools. Seems like original Fortuna does not
* do it, resetting hash after each request. I guess expecting
* feeding to happen more often that requesting. This is absolutely
* unsuitable for pgcrypto, as nothing asynchronous happens here.
@ -77,7 +77,7 @@
* How many pools.
*
* Original Fortuna uses 32 pools, that means 32'th pool is
* used not earlier than in 13th year. This is a waste in
* used not earlier than in 13th year. This is a waste in
* pgcrypto, as we have very low-frequancy seeding. Here
* is preferable to have all entropy usable in reasonable time.
*
@ -296,7 +296,7 @@ reseed(FState *st)
}
/*
* Pick a random pool. This uses key bytes as random source.
* Pick a random pool. This uses key bytes as random source.
*/
static unsigned
get_rand_pool(FState *st)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -21,7 +21,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
@ -211,7 +211,7 @@ static int s_vcmp(mp_int a, int v);
static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
/* Unsigned magnitude subtraction. Assumes dc is big enough. */
/* Unsigned magnitude subtraction. Assumes dc is big enough. */
static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
@ -2275,7 +2275,7 @@ mp_error_string(mp_result res)
/* }}} */
/*------------------------------------------------------------------------*/
/* Private functions for internal use. These make assumptions. */
/* Private functions for internal use. These make assumptions. */
/* {{{ s_alloc(num) */

View File

@ -20,7 +20,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -90,7 +90,7 @@ pgp_cfb_free(PGP_CFB *ctx)
}
/*
* Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
* Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -147,7 +147,7 @@ bn_to_mpi(mpz_t *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
* above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -82,7 +82,7 @@ bn_to_mpi(BIGNUM *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
* above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@ -93,7 +93,7 @@ add_block_entropy(PX_MD *md, text *data)
}
/*
* Mix user data into RNG. It is for user own interests to have
* Mix user data into RNG. It is for user own interests to have
* RNG state shuffled.
*/
static void
@ -310,7 +310,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
}
/*
* Find next word. Handle ',' and '=' as words. Skip whitespace.
* Find next word. Handle ',' and '=' as words. Skip whitespace.
* Put word info into res_p, res_len.
* Returns ptr to next word.
*/

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -7,12 +7,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
/* programme of the US National Institute of Standards and Technology. */
/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
/* that the originators of the algorithm place on its exploitation. */
/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
@ -188,7 +188,7 @@ gen_tabs(void)
/* rijndael specification is in big endian format with */
/* bit 0 as the most significant bit. In the remainder */
/* of the specification the bits are numbered from the */
/* least significant end of a byte. */
/* least significant end of a byte. */
for (i = 0; i < 256; ++i)
{

View File

@ -8,12 +8,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
/* programme of the US National Institute of Standards and Technology. */
/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
/* that the originators of the algorithm place on its exploitation. */
/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */

View File

@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)

View File

@ -296,7 +296,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
/*
* To avoid physically reading the table twice, try to do the
* free-space scan in parallel with the heap scan. However,
* free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/

View File

@ -381,7 +381,7 @@ gseg_picksplit(GistEntryVector *entryvec,
rt_seg_size(datum_r, &size_r);
/*
* Now split up the regions between the two seeds. An important property
* Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@ -397,7 +397,7 @@ gseg_picksplit(GistEntryVector *entryvec,
{
/*
* If we've already decided where to place this item, just put it on
* the right list. Otherwise, we need to figure out which page needs
* the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/

View File

@ -89,7 +89,7 @@ moddatetime(PG_FUNCTION_ARGS)
/*
* This is were we check to see if the field we are supposed to update
* even exits. The above function must return -1 if name not found?
* even exits. The above function must return -1 if name not found?
*/
if (attnum < 0)
ereport(ERROR,

View File

@ -47,17 +47,17 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
/*
* timetravel () --
* 1. IF an update affects tuple with stop_date eq INFINITY
* 1. IF an update affects tuple with stop_date eq INFINITY
* then form (and return) new tuple with start_date eq current date
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
* 2. IF an delete affects tuple with stop_date eq INFINITY
* 2. IF an delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
* 3. On INSERT, if start_date is NULL then current date will be
* 3. On INSERT, if start_date is NULL then current date will be
* inserted, if stop_date is NULL then INFINITY will be inserted.
* [ and insert_user eq current user, update_user and delete_user
* eq NULL ]

View File

@ -104,7 +104,7 @@ ssl_client_serial(PG_FUNCTION_ARGS)
* current database encoding if possible. Any invalid characters are
* replaced by question marks.
*
* Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
* Parameter: str - OpenSSL ASN1_STRING structure. Memory managment
* of this structure is responsibility of caller.
*
* Returns Datum, which can be directly returned from a C language SQL

View File

@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
* that have been put into a tuple but never sent to disk. Hopefully there
* that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@ -388,7 +388,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize the
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@ -455,7 +455,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@ -550,7 +550,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
* can in fact also be a combo command id. XXX perhaps we should
* can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@ -710,7 +710,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
* Allocate and zero the space needed. Note that the tuple body and
* Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);

View File

@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
* on outside storage. This should be improved someday.
* on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@ -281,7 +281,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize the
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@ -348,7 +348,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/

View File

@ -166,7 +166,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/

View File

@ -466,7 +466,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
* validity of the values. It must elog(ERROR) when the argument string is
* validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@ -533,7 +533,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
* Note that this is not responsible for determining whether the options
* are valid, but it does check that namespaces for all the options given are
* listed in validnsps. The NULL namespace is always valid and needs not be
* explicitely listed. Passing a NULL pointer means that only the NULL
* explicitely listed. Passing a NULL pointer means that only the NULL
* namespace is valid.
*
* Both oldOptions and the result are text arrays (or NULL for "default"),
@ -806,7 +806,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
* returned array. Values of type string are allocated separately and must
* returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *

View File

@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
* of dropped columns. There is some overlap of functionality with the
* of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*

View File

@ -505,7 +505,7 @@ TupleDescInitEntry(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc

View File

@ -180,7 +180,7 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
* rebalancing operations and is slow. To prevent this, we attempt to insert
* rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*

View File

@ -104,7 +104,7 @@ GinFormTuple(Relation index, GinState *ginstate,
* Gin tuple without any ItemPointers should be large enough to keep
* one ItemPointer, to prevent inconsistency between
* ginHeapTupleFastCollect and ginEntryInsert called by
* ginHeapTupleInsert. ginHeapTupleFastCollect forms tuple without
* ginHeapTupleInsert. ginHeapTupleFastCollect forms tuple without
* extra pointer to heap, but ginEntryInsert (called for pending list
* cleanup during vacuum) will form the same tuple with one
* ItemPointer.

View File

@ -670,7 +670,7 @@ processPendingPage(BuildAccumulator *accum, DatumArray *da,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
* either. The reason it's okay is that multiple insertion of the same entry
* either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@ -723,7 +723,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
* Initialize. All temporary space will be in opCtx
* Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@ -817,7 +817,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
* added to it. If so, process those entries immediately. There
* added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* gurantees that inserted row(s) will not continue on next page.

View File

@ -1293,10 +1293,10 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* case like
*
* stream 1 stream 2
* ... ...
* ... ...
* 42/6 42/7
* 50/1 42/0xffff
* ... ...
* ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@ -1368,10 +1368,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
* bitmap. After we scan a pending item, some other backend could post it
* bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
* bitmap. (The possibility of duplicate visits is a major reason why GIN
* bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.

View File

@ -201,7 +201,7 @@ newScanKey(IndexScanDesc scan)
{
/*
* extractQueryFn signals that nothing can match, so we can just
* set isVoidRes flag. No need to examine any more keys.
* set isVoidRes flag. No need to examine any more keys.
*/
so->isVoidRes = true;
break;
@ -210,9 +210,9 @@ newScanKey(IndexScanDesc scan)
if (entryValues == NULL || nEntryValues == 0)
{
/*
* extractQueryFn signals that everything matches. This would
* extractQueryFn signals that everything matches. This would
* require a full scan, which we can't do, but perhaps there is
* another scankey that provides a restriction to use. So we keep
* another scankey that provides a restriction to use. So we keep
* going and check only at the end.
*/
continue;

View File

@ -691,7 +691,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
* pages-to-be-deleted simultaneously. During replay, however, it should
* pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we

View File

@ -191,7 +191,7 @@ gistbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call gistdoinsert
* directly. Normal access method calls dispatch through gistinsert,
* which locks the relation for write. This is the right thing to do if
* which locks the relation for write. This is the right thing to do if
* you're inserting single tups, but not when you're initializing the
* whole index at once.
*

View File

@ -72,7 +72,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
* Note: we always recompute union keys for all index columns. In some cases
* Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@ -161,7 +161,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
* of length *len. This is applied separately to the spl_left and spl_right
* of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@ -194,7 +194,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
* the previously-computed union keys. We need consider only columns starting
* the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@ -292,7 +292,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
* or not by lowest penalty for that side. We can only get here if a
* or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@ -426,7 +426,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
* Let the opclass-specific PickSplit method do its thing. Note that at
* Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2(&giststate->picksplitFn[attno],

View File

@ -448,7 +448,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
* as the target, and record the best penalty. Then reset the
* as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@ -464,7 +464,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
* best tuple seen so far. The next iteration of this loop
* best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@ -631,7 +631,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))

View File

@ -62,7 +62,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->std.estimated_count = info->estimated_count;
/*
* XXX the above is wrong if index is partial. Would it be OK to just
* XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}

View File

@ -76,7 +76,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
* tuples by (expected) bucket number. However, such a sort is useless
* tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@ -233,7 +233,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
* for the TID we previously returned. (Because we hold share lock on
* for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
@ -510,7 +510,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
* hashm_spares[] values to compute bucket page addresses. This is a bit
* hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@ -641,7 +641,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
* double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/

View File

@ -11,7 +11,7 @@
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.62 2010/01/07 04:53:34 tgl Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
* These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
* different in non-C locales. (See also hashbpchar, if so.)
* different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
* direction from the goal of parallelism. I did what I could. Rotates
* direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
* bits (affecting 32 bits is enough). The original hash function had
* bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
* Returns a uint32 value. Every bit of the key affects every bit of
* Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).

View File

@ -81,7 +81,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@ -90,12 +90,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
* no one else tries to compact the bucket meanwhile. This guarantees that
* no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
* immediate successor of the originally passed 'buf'. Additional overflow
* immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@ -158,7 +158,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
* Find an available overflow page and return it. The returned buffer
* Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@ -254,7 +254,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
* is the one we return to the caller. Both of these are correctly
* is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@ -285,7 +285,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@ -314,7 +314,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@ -495,7 +495,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
* Initialize a new bitmap page. The metapage has a write-lock upon
* Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.

View File

@ -52,7 +52,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
* take locks. We still take buffer-level locks, but not lmgr locks.
* take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@ -139,7 +139,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
* _hash_pageinit() is applied automatically. Otherwise it has
* _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@ -347,7 +347,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
* as the user-settable fillfactor parameter says. We can compute it
* as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@ -380,7 +380,7 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
* calls to occur. This ensures that the smgr level has the right idea of
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
@ -516,9 +516,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Note: deadlock should be impossible here. Our own backend could only be
* holding bucket sharelocks due to stopped indexscans; those will not
* block other holders of the page-zero lock, who are only interested in
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
* acquiring bucket sharelocks themselves. Exclusive bucket locks are
* only taken here and in hashbulkdelete, and neither of these operations
* needs any additional locks to complete. (If, due to some flaw in this
* needs any additional locks to complete. (If, due to some flaw in this
* reasoning, we manage to deadlock anyway, it's okay to error out; the
* index will be left in a consistent state.)
*/
@ -560,7 +560,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
* bucket. If we can't get the lock, give up.
* bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@ -618,7 +618,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
* Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@ -656,7 +656,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
* date before _hash_splitbucket finishes. That's okay, since all it
* date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@ -897,7 +897,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/

View File

@ -251,7 +251,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
* false. Else, return true and set the hashso_curpos for the
* false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.

View File

@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
* bucket splits may occur during the index build. In that case we'd
* bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@ -52,7 +52,7 @@ _h_spoolinit(Relation index, uint32 num_buckets)
hspool->index = index;
/*
* Determine the bitmask for hash code values. Since there are currently
* Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*

View File

@ -156,7 +156,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@ -276,7 +276,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
* existing hash keys in the page. This is the appropriate place to start
* existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber

View File

@ -107,7 +107,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
* might go into pages we already scanned. To guarantee consistent
* might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@ -115,7 +115,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
* strategy and enable synchronized scanning (see syncscan.c). Although
* strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@ -244,7 +244,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
* visibility. Afterwards, however, the tuples we have found to be
* visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@ -1642,7 +1642,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
* show the latest version of the row. Note that it will not be changed
* show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@ -1760,7 +1760,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@ -1833,7 +1833,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
* TID where the tuple was stored. But note that any toasting of fields
* TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@ -1854,7 +1854,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If the object id of this tuple has already been assigned, trust the
* caller. There are a couple of ways this can happen. At initial db
* caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@ -2140,10 +2140,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to delete
* other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
* exclusive). We don't bother changing the on-disk hint bits
* exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@ -2207,7 +2207,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@ -2308,7 +2308,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@ -2404,7 +2404,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
* different page. But we must compute the list before obtaining buffer
* different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@ -2492,10 +2492,10 @@ l2:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to update
* other subxacts of this backend. It is legal for us to update
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
* exclusive). We don't bother changing the on-disk hint bits
* exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@ -2585,7 +2585,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
* TOAST and/or table-file-extension work. We must mark the old tuple to
* TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@ -2650,7 +2650,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
* buffer locks on both old and new pages. To avoid deadlock against
* buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@ -2698,7 +2698,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
* has enough space for the new tuple. If they are the same buffer, only
* has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@ -2706,7 +2706,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
* to do a HOT update. Check if any of the index columns have been
* to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@ -2724,13 +2724,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
* not to optimize for aborts. Note that heap_xlog_update must be kept in
* not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@ -2894,7 +2894,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
* there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
*/
@ -2917,7 +2917,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
* same logical value. But we should be OK as long as there are no false
* same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@ -2973,7 +2973,7 @@ HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
* on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@ -3055,7 +3055,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* waiter gets the tuple, potentially leading to indefinite starvation of
* some waiters. The possibility of share-locking makes the problem much
* worse --- a steady stream of share-lockers can easily block an exclusive
* locker forever. To provide more reliable semantics about who gets a
* locker forever. To provide more reliable semantics about who gets a
* tuple-level lock first, we use the standard lock manager. The protocol
* for waiting for a tuple-level lock is really
* LockTuple()
@ -3063,7 +3063,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* mark tuple as locked by me
* UnlockTuple()
* When there are multiple waiters, arbitration of who is to get the lock next
* is provided by LockTuple(). However, at most one tuple-level lock will
* is provided by LockTuple(). However, at most one tuple-level lock will
* be held or awaited per backend at any time, so we don't risk overflow
* of the lock table. Note that incoming share-lockers are required to
* do LockTuple as well, if there is any conflict, to ensure that they don't
@ -3205,7 +3205,7 @@ l3:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
* other subxacts of this backend. It is legal for us to lock the
* other subxacts of this backend. It is legal for us to lock the
* tuple in either case, however. We don't bother changing the
* on-disk hint bits since we are about to overwrite the xmax
* altogether.
@ -3363,7 +3363,7 @@ l3:
/*
* Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
* as running, but it finished before
* TransactionIdIsInProgress() got to run. Treat it like
* TransactionIdIsInProgress() got to run. Treat it like
* there's no locker in the tuple.
*/
}
@ -3399,8 +3399,8 @@ l3:
MarkBufferDirty(*buffer);
/*
* XLOG stuff. You might think that we don't need an XLOG record because
* there is no state change worth restoring after a crash. You would be
* XLOG stuff. You might think that we don't need an XLOG record because
* there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@ -3462,7 +3462,7 @@ l3:
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
* of this function in Postgres are extremely limited. Nonetheless we
* of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@ -3616,7 +3616,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
* exclusive lock. (We don't need this pushup for xmin, because only
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
@ -3761,7 +3761,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
* If we reached end of scan, rs_inited will now be false. We must
* If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@ -3945,7 +3945,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-freeze operation. Caller must already
* Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@ -3988,7 +3988,7 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-update operation. Caller must already
* Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@ -4067,7 +4067,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
* directly to smgr, rather than using bufmgr. Therefore there is no need
* directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
@ -4549,7 +4549,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
/*
* In normal operation, it is important to lock the two pages in
* page-number order, to avoid possible deadlocks against other update
* operations going the other way. However, during WAL replay there can
* operations going the other way. However, during WAL replay there can
* be no other update happening, so we don't need to worry about that. But
* we *do* need to worry that we don't expose an inconsistent state to Hot
* Standby queries --- so the original page can't be unlocked before we've

View File

@ -117,7 +117,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
* enough there). In that case, the page will be pinned and locked only once.
*
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
@ -134,7 +134,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
* insertions into the same relation. This keeps a pin on the current
* insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@ -187,7 +187,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
* on, as cached in the BulkInsertState or relcache entry. If that
* on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@ -219,7 +219,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
* give up and extend. This avoids one-tuple-per-page syndrome during
* give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@ -280,7 +280,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
* any) and prepare to look elsewhere. We don't care which order we
* any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@ -322,7 +322,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is
* only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@ -342,7 +342,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
* the relation some more. Note that we cannot release this lock before
* the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/

View File

@ -100,7 +100,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
* or pd_upper is probably atomic. Avoiding taking a lock seems more
* or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@ -319,8 +319,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
* prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@ -362,7 +362,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
* any pruning occurs. So we have to be able to reap them
* any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@ -549,7 +549,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
* count it in the result. But changing a redirect (even to DEAD
* count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@ -638,7 +638,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
* to replay the WAL record when needed after a crash. Note that the
* to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void

View File

@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
* or B first. (Note that a tuple in the middle of a chain is both A and B
* or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
* is encountered. That helps to keep the memory usage down. At the end,
* is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
* for deadness using OldestXmin is not exact. In such a case we might
* for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@ -339,7 +339,7 @@ rewrite_heap_tuple(RewriteState state,
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
* be given a pointer to a tuple in a disk buffer. It happens though that
* be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
@ -543,7 +543,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
* Insert a tuple to the new relation. This has to track heap_insert
* Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the

View File

@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
* tables in progress at any time. Therefore we just keep the scan positions
* tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@ -245,7 +245,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
* so that number is passed in rather than computing it again. The result
* so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber

View File

@ -550,7 +550,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
* PLAIN storage). If necessary, we'll push it out as a new
* PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@ -693,7 +693,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
* inline. But skip this if there's no toast table to push them to.
* inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@ -803,7 +803,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
}
/*
* Finally we store attributes of type 'm' externally. At this point we
* Finally we store attributes of type 'm' externally. At this point we
* increase the target tuple size, so that 'm' attributes aren't stored
* externally unless really necessary.
*/
@ -1415,7 +1415,7 @@ toast_save_datum(Relation rel, Datum value,
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
* Create the index entry. We cheat a little here by not using
* Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*

View File

@ -44,7 +44,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
* routine does that. (We can't do it in the AM because index_endscan
* routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@ -185,7 +185,7 @@ BuildIndexValueDescription(Relation indexRelation,
* at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
* input types such as ANYARRAY or RECORD. Currently, the
* input types such as ANYARRAY or RECORD. Currently, the
* typoutput functions associated with the pseudotypes will work
* okay, but we might have to try harder in future.
*/
@ -410,7 +410,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
* Currently we do not support non-index-based scans here. (In principle
* Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of

View File

@ -126,7 +126,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
* obtained on the index. (Generally, NoLock should only be
* obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@ -287,7 +287,7 @@ index_beginscan_internal(Relation indexRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
* cannot change). To restart the scan without changing keys, pass NULL
* cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@ -375,7 +375,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
* that merge-join plans only work for MVCC snapshots. This could be fixed
* that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@ -400,7 +400,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
* snapshot, or NULL if no more matching tuples exist. On success,
* snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan).
*
@ -438,7 +438,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
* earlier member. Must still hold pin on current heap page.
* earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@ -559,7 +559,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The xmin should match the previous xmax value, else chain is
* broken. (Note: this test is not optional because it protects
* broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
@ -761,7 +761,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
* type instead). Only the default functions are stored in relcache
* type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@ -795,7 +795,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
* support procs in the relcache. As above, only the "default"
* support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during

View File

@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
* That could overflow and give the wrong answer. Also, one must not
* That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order

View File

@ -88,7 +88,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
* will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@ -127,7 +127,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
* move right in the tree. See Lehman and Yao for an excruciatingly
* move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@ -199,7 +199,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
* we must wait for to see if it commits a conflicting tuple. If an actual
* we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
@ -281,7 +281,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
* are rechecking. It's not a duplicate, but we have to keep
* are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@ -462,7 +462,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
* the next page. If it is equal to the high key, and there's not room
* the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@ -474,7 +474,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@ -540,7 +540,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
* pages). Currently the probability of moving right is set at 0.99,
* pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@ -641,7 +641,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
* insertion, and the buffer must be pinned and write-locked. On return,
* insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@ -1120,7 +1120,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
* examine both pages. But if D, our right sibling, has a different
* examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@ -1342,7 +1342,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
* the left or right page. The bool is necessary to disambiguate the case
* the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@ -1578,7 +1578,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
* write locks once we have write lock on the parent page. (Any sooner,
* write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@ -1601,7 +1601,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
* descended the tree. If it was still the root then we perform a
* descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@ -1751,7 +1751,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
* is. Scan to the right first, then to the left.
* is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;

View File

@ -12,7 +12,7 @@
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.123.2.1 2010/08/29 19:33:21 tgl Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
* Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@ -56,7 +56,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
* Set pd_lower just past the end of the metadata. This is not essential
* Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@ -74,7 +74,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
* and no root page exists, we just return InvalidBuffer. For
* and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@ -191,7 +191,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
* over again. (Is that really true? But it's hardly worth trying
* over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@ -256,7 +256,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
* swap root write lock for read lock. There is no danger of anyone
* swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@ -324,7 +324,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
* from whatever non-root page we were at. If we ever do need to lock the
* from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@ -423,7 +423,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@ -491,7 +491,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
* blkno == P_NEW means to get an unallocated index page. The page
* blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@ -522,7 +522,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
* the page is still free. (For example, an already-free page could
* the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@ -701,7 +701,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page.
*
* This must only be used for deleting leaf items. Deleting an item on a
* This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@ -769,7 +769,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
* is. When XLogInsert stores the whole buffer, the offsets array
* is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@ -1008,7 +1008,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
BTPageOpaque opaque;
/*
* We can never delete rightmost pages nor root pages. While at it, check
* We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@ -1080,7 +1080,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
* facilities not available in recovery mode). Instead, just set
* facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@ -1115,7 +1115,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
* if so, give up. (Right now, that should never happen, since page
* if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@ -1144,7 +1144,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
lbuf = InvalidBuffer;
/*
* Next write-lock the target page itself. It should be okay to take just
* Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/

View File

@ -152,7 +152,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
* index metapage. We expect that the caller will ensure that happens
* index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@ -260,11 +260,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
* Yes, remember it for later. (We'll deal with all such tuples
* Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
* multiple times. It's not worth trying to optimize that, so we
* multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@ -828,7 +828,7 @@ restart:
vstate->lastBlockLocked = blkno;
/*
* Check whether we need to recurse back to earlier pages. What we
* Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.

Some files were not shown because too many files have changed in this diff Show More