From 8eaef91fff849885a7369a21a752e87cb1e592c8 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Dec 2004 05:19:21 +0200 Subject: [PATCH 01/21] Add 0x before pointers (to help with debugging) Add support for VARCHAR with 1 or 2 length bytes Enable VARCHAR packing in MyISAM files (previous patch didn't pack data properly) Give error if we got problems in temporary tables during a SELECT Don't use new table generated by ALTER TABLE if index generation fails Fixed wrong call by range_end() (Could cause an ASSERT in debug mode) BUILD/SETUP.sh: Add flags for Intel 64 dbug/dbug.c: Add 0x before pointers (to help with debugging) heap/_check.c: Add 0x before pointers (to help with debugging) heap/hp_create.c: Add support for VARCHAR with 1 or 2 length bytes heap/hp_delete.c: Add 0x before pointers heap/hp_hash.c: Add support for VARCHAR with 1 or 2 length bytes Added more debugging heap/hp_open.c: Add 0x before pointers heap/hp_rkey.c: Add 0x before pointers heap/hp_rrnd.c: Add 0x before pointers heap/hp_write.c: Add 0x before pointers include/my_base.h: Add support for VARCHAR with 1 or 2 length bytes myisam/ft_static.c: Add support for VARCHAR with 1 or 2 length bytes myisam/ft_test1.c: Add support for VARCHAR with 1 or 2 length bytes Fixed indentation (This file should probably be deleted as it doesn't compile) myisam/ft_update.c: Add support for VARCHAR with 1 or 2 length bytes Fixed indentation Removed some not needed 'else' myisam/mi_check.c: Don't give an error for tables packed with myisampack myisam/mi_checksum.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_create.c: Add support for VARCHAR with 1 or 2 length bytes Store in number of pack-length-bytes in keyseg->bit_start myisam/mi_dbug.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_dynrec.c: Add support for VARCHAR with 1 or 2 length bytes (old code in _mi_rec_unpack() didn't really work with VARCHAR's) myisam/mi_key.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_open.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_packrec.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_search.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_test1.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_test3.c: Add support for VARCHAR with 1 or 2 length bytes myisam/mi_test_all.res: Update results myisam/mi_unique.c: Add support for VARCHAR with 1 or 2 length bytes myisam/myisampack.c: Add support for VARCHAR with 1 or 2 length bytes mysql-test/include/varchar.inc: Added more tests mysql-test/r/bdb.result: Update results after new tests mysql-test/r/information_schema.result: Update results mysql-test/r/innodb.result: Update results mysql-test/r/myisam.result: Update results after new tests mysql-test/r/ps_1general.result: Update results mysql-test/t/bdb.test: Shorter comments mysys/list.c: Add 0x before pointers mysys/my_handler.c: Add support for VARCHAR with 1 or 2 length bytes mysys/raid.cc: Add 0x before pointers sql/field.cc: Add support for VARCHAR with 1 or 2 length bytes sql/field.h: Add support for VARCHAR with 1 or 2 length bytes sql/field_conv.cc: Add support for VARCHAR with 1 or 2 length bytes sql/ha_berkeley.cc: Add support for VARCHAR with 1 or 2 length bytes sql/ha_heap.cc: Add support for VARCHAR with 1 or 2 length bytes sql/ha_myisam.cc: Ensure that enable_indexes() will report an error if it fails Enable VARCHAR packing for MyISAM files sql/item_sum.cc: Change key_cmp -> cmp() as we are comparing fields, not key segements sql/opt_range.cc: Add support for VARCHAR with 1 or 2 length bytes Change range_end to call ha_index_or_rnd_end() as in some error cases we may be in rnd mode when we abort sql/sql_base.cc: Remove compiler warning sql/sql_parse.cc: Move length checking code to sql_table.cc (as we don't have character set for fields at this stage) sql/sql_select.cc: Add support for VARCHAR with 1 or 2 length bytes Ensure that we report an error if we get an error while writing to internal temporary tables sql/sql_select.h: Add support for VARCHAR with 1 or 2 length bytes sql/sql_show.cc: Fix typo in comment sql/sql_table.cc: Don't use new table generated by ALTER TABLE if index generation fails vio/vio.c: Fixed DBUG info vio/viosocket.c: Fixed DBUG info vio/viossl.c: Fixed DBUG info vio/viosslfactories.c: Fixed DBUG info --- BUILD/SETUP.sh | 1 + BUILD/compile-pentium64-valgrind-max | 29 +++ dbug/dbug.c | 2 +- heap/_check.c | 4 +- heap/hp_create.c | 23 +- heap/hp_delete.c | 4 +- heap/hp_hash.c | 101 +++++--- heap/hp_open.c | 4 +- heap/hp_rkey.c | 2 +- heap/hp_rrnd.c | 8 +- heap/hp_write.c | 2 +- include/my_base.h | 10 +- myisam/ft_static.c | 4 +- myisam/ft_test1.c | 85 ++++--- myisam/ft_update.c | 33 +-- myisam/mi_check.c | 3 +- myisam/mi_checksum.c | 8 +- myisam/mi_create.c | 82 ++++-- myisam/mi_dbug.c | 6 +- myisam/mi_dynrec.c | 75 ++++-- myisam/mi_key.c | 31 ++- myisam/mi_open.c | 8 +- myisam/mi_packrec.c | 29 ++- myisam/mi_search.c | 6 +- myisam/mi_test1.c | 63 +++-- myisam/mi_test3.c | 1 + myisam/mi_test_all.res | 73 +++--- myisam/mi_unique.c | 33 ++- myisam/myisampack.c | 16 +- mysql-test/include/varchar.inc | 60 +++++ mysql-test/r/bdb.result | 211 +++++++++++++++ mysql-test/r/information_schema.result | 4 +- mysql-test/r/innodb.result | 6 +- mysql-test/r/myisam.result | 217 +++++++++++++++- mysql-test/r/ps_1general.result | 2 +- mysql-test/t/bdb.test | 5 +- mysys/list.c | 2 +- mysys/my_handler.c | 6 +- mysys/raid.cc | 14 +- sql/field.cc | 340 +++++++++++++++++-------- sql/field.h | 43 ++-- sql/field_conv.cc | 39 ++- sql/ha_berkeley.cc | 27 +- sql/ha_heap.cc | 6 +- sql/ha_myisam.cc | 10 +- sql/item_sum.cc | 6 +- sql/opt_range.cc | 10 +- sql/sql_base.cc | 2 + sql/sql_parse.cc | 8 +- sql/sql_select.cc | 63 ++--- sql/sql_select.h | 7 +- sql/sql_show.cc | 2 +- sql/sql_table.cc | 5 +- vio/vio.c | 5 +- vio/viosocket.c | 14 +- vio/viossl.c | 21 +- vio/viosslfactories.c | 8 +- 57 files changed, 1417 insertions(+), 472 deletions(-) create mode 100644 BUILD/compile-pentium64-valgrind-max diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 8fa70aecb6a..d378276a0a3 100644 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -45,6 +45,7 @@ cxx_warnings="$global_warnings -Woverloaded-virtual -Wsign-promo -Wreorder -Wcto alpha_cflags="-mcpu=ev6 -Wa,-mev6" # Not used yet pentium_cflags="-mcpu=pentiumpro" +pentium64_cflags="-mcpu=nocona -m64" ppc_cflags="-mpowerpc -mcpu=powerpc" sparc_cflags="" diff --git a/BUILD/compile-pentium64-valgrind-max b/BUILD/compile-pentium64-valgrind-max new file mode 100644 index 00000000000..7f78089c3e8 --- /dev/null +++ b/BUILD/compile-pentium64-valgrind-max @@ -0,0 +1,29 @@ +#! /bin/sh + +path=`dirname $0` +. "$path/SETUP.sh" + +extra_flags="$pentium64_cflags $debug_cflags -USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify -DMYSQL_SERVER_SUFFIX=-valgrind-max" +c_warnings="$c_warnings $debug_extra_warnings" +cxx_warnings="$cxx_warnings $debug_extra_warnings" +extra_configs="$pentium_configs $debug_configs" + +# We want to test isam when building with valgrind +extra_configs="$extra_configs --with-berkeley-db --with-innodb --with-isam --with-embedded-server --with-openssl --with-vio --with-raid --with-ndbcluster" + +. "$path/FINISH.sh" + +if test -z "$just_print" +then + set +v +x + echo "\ +****************************************************************************** +Note that by default BUILD/compile-pentium-valgrind-max calls 'configure' with +--enable-assembler. When Valgrind detects an error involving an assembly +function (for example an uninitialized value used as an argument of an +assembly function), Valgrind will not print the stacktrace and 'valgrind +--gdb-attach=yes' will not work either. If you need a stacktrace in those +cases, you have to run BUILD/compile-pentium-valgrind-max with the +--disable-assembler argument. +******************************************************************************" +fi diff --git a/dbug/dbug.c b/dbug/dbug.c index d21b4e7801a..91b7e7b6c4c 100644 --- a/dbug/dbug.c +++ b/dbug/dbug.c @@ -978,7 +978,7 @@ uint length) { fprintf(_db_fp_, "%s: ", state->func); } - sprintf(dbuff,"%s: Memory: %lx Bytes: (%d)\n", + sprintf(dbuff,"%s: Memory: 0x%lx Bytes: (%d)\n", keyword,(ulong) memory, length); (void) fputs(dbuff,_db_fp_); diff --git a/heap/_check.c b/heap/_check.c index a745aee48bf..4316a9926f7 100644 --- a/heap/_check.c +++ b/heap/_check.c @@ -123,7 +123,7 @@ static int check_one_key(HP_KEYDEF *keydef, uint keynr, ulong records, blength, records)) != i) { - DBUG_PRINT("error",("Record in wrong link: Link %d Record: %lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); + DBUG_PRINT("error",("Record in wrong link: Link %d Record: 0x%lx Record-link %d", i,hash_info->ptr_to_rec,rec_link)); error=1; } else @@ -180,7 +180,7 @@ static int check_one_rb_key(HP_INFO *info, uint keynr, ulong records, key_length, SEARCH_FIND | SEARCH_SAME, ¬_used)) { error= 1; - DBUG_PRINT("error",("Record in wrong link: key: %d Record: %lx\n", + DBUG_PRINT("error",("Record in wrong link: key: %d Record: 0x%lx\n", keynr, recpos)); } else diff --git a/heap/hp_create.c b/heap/hp_create.c index d296c9db28b..0580c178498 100644 --- a/heap/hp_create.c +++ b/heap/hp_create.c @@ -77,14 +77,31 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef, case HA_KEYTYPE_INT8: keyinfo->seg[j].flag|= HA_SWAP_KEY; break; - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARBINARY1: /* Case-insensitiveness is handled in coll->hash_sort */ - keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT; + keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; /* fall_through */ - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: if (!my_binary_compare(keyinfo->seg[j].charset)) keyinfo->flag|= HA_END_SPACE_KEY; keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* Save number of bytes used to store length */ + keyinfo->seg[j].bit_start= 1; + break; + case HA_KEYTYPE_VARBINARY2: + /* Case-insensitiveness is handled in coll->hash_sort */ + /* fall_through */ + case HA_KEYTYPE_VARTEXT2: + if (!my_binary_compare(keyinfo->seg[j].charset)) + keyinfo->flag|= HA_END_SPACE_KEY; + keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* Save number of bytes used to store length */ + keyinfo->seg[j].bit_start= 2; + /* + Make future comparison simpler by only having to check for + one type + */ + keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; break; default: break; diff --git a/heap/hp_delete.c b/heap/hp_delete.c index 4adefde1fe9..5287533ae0a 100644 --- a/heap/hp_delete.c +++ b/heap/hp_delete.c @@ -24,7 +24,7 @@ int heap_delete(HP_INFO *info, const byte *record) HP_SHARE *share=info->s; HP_KEYDEF *keydef, *end, *p_lastinx; DBUG_ENTER("heap_delete"); - DBUG_PRINT("enter",("info: %lx record: %lx",info,record)); + DBUG_PRINT("enter",("info: %lx record: 0x%lx",info,record)); test_active(info); @@ -139,7 +139,7 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, /* Save for heap_rnext/heap_rprev */ info->current_hash_ptr=last_ptr; info->current_ptr = last_ptr ? last_ptr->ptr_to_rec : 0; - DBUG_PRINT("info",("Corrected current_ptr to point at: %lx", + DBUG_PRINT("info",("Corrected current_ptr to point at: 0x%lx", info->current_ptr)); } empty=pos; diff --git a/heap/hp_hash.c b/heap/hp_hash.c index 7e5f92bc7b8..3121ef71fb0 100644 --- a/heap/hp_hash.c +++ b/heap/hp_hash.c @@ -271,18 +271,21 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } cs->coll->hash_sort(cs, pos, length, &nr, &nr2); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; + uint pack_length= 2; /* Key packing is constant */ uint length= uint2korr(pos); if (cs->mbmaxlen > 1) { uint char_length; - char_length= my_charpos(cs, pos +2, pos +2 + length, + char_length= my_charpos(cs, pos +pack_length, + pos +pack_length + length, seg->length/cs->mbmaxlen); set_if_smaller(length, char_length); } - cs->coll->hash_sort(cs, pos+2, length, &nr, &nr2); + cs->coll->hash_sort(cs, pos+pack_length, length, &nr, &nr2); + key+= pack_length; } else { @@ -293,6 +296,7 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } } } + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); return((ulong) nr); } @@ -300,7 +304,6 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) { - /*register*/ ulong nr=1, nr2=4; HA_KEYSEG *seg,*endseg; @@ -327,18 +330,20 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } cs->coll->hash_sort(cs, pos, char_length, &nr, &nr2); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; - uint length= uint2korr(pos); + uint pack_length= seg->bit_start; + uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos)); if (cs->mbmaxlen > 1) { uint char_length; - char_length= my_charpos(cs, pos + 2 , pos + 2 + length, + char_length= my_charpos(cs, pos + pack_length, + pos + pack_length + length, seg->length/cs->mbmaxlen); set_if_smaller(length, char_length); } - cs->coll->hash_sort(cs, pos+2, length, &nr, &nr2); + cs->coll->hash_sort(cs, pos+pack_length, length, &nr, &nr2); } else { @@ -349,7 +354,8 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } #else @@ -392,10 +398,13 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) { seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { + uint pack_length= 2; /* Key packing is constant */ uint length= uint2korr(pos); - seg->charset->hash_sort(seg->charset, pos+2, length, &nr, NULL); + seg->charset->hash_sort(seg->charset, pos+pack_length, length, &nr, + NULL); + key+= pack_length; } else { @@ -406,7 +415,8 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } /* Calc hashvalue for a key in a record */ @@ -418,7 +428,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) for (seg=keydef->seg,endseg=seg+keydef->keysegs ; seg < endseg ; seg++) { - uchar *pos=(uchar*) rec+seg->start,*end=pos+seg->length; + uchar *pos=(uchar*) rec+seg->start; if (seg->null_bit) { if (rec[seg->null_pos] & seg->null_bit) @@ -431,13 +441,16 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) { seg->charset->hash_sort(seg->charset,pos,((uchar*)key)-pos,&nr,NULL); } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { - uint length= uint2korr(pos); - seg->charset->hash_sort(seg->charset, pos+2, length, &nr, NULL); + uint pack_length= seg->bit_start; + uint length= (pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos)); + seg->charset->hash_sort(seg->charset, pos+pack_length, + length, &nr, NULL); } else { + uchar *end= pos+seg->length; for ( ; pos < end ; pos++) { nr *=16777619; @@ -445,7 +458,8 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const byte *rec) } } } - return((ulong) nr); + DBUG_PRINT("exit", ("hash: 0x%lx", nr)); + return(nr); } #endif @@ -510,13 +524,25 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, pos2,char_length2, 0)) return 1; } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { - uchar *pos1= (uchar*)rec1 + seg->start; - uchar *pos2= (uchar*)rec2 + seg->start; - uint char_length1= uint2korr(pos1); - uint char_length2= uint2korr(pos2); + uchar *pos1= (uchar*) rec1 + seg->start; + uchar *pos2= (uchar*) rec2 + seg->start; + uint char_length1, char_length2; + uint pack_length= seg->bit_start; CHARSET_INFO *cs= seg->charset; + if (pack_length == 1) + { + char_length1= (uint) *(uchar*) pos1++; + char_length2= (uint) *(uchar*) pos2++; + } + else + { + char_length1= uint2korr(pos1); + char_length2= uint2korr(pos2); + pos1+= 2; + pos2+= 2; + } if (cs->mbmaxlen > 1) { uint char_length= seg->length / cs->mbmaxlen; @@ -527,8 +553,8 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const byte *rec1, const byte *rec2, } if (cs->coll->strnncollsp(seg->charset, - pos1+2, char_length1, - pos2+2, char_length2, + pos1, char_length1, + pos2, char_length2, seg->flag & HA_END_SPACE_ARE_EQUAL ? 0 : diff_if_only_endspace_difference)) return 1; @@ -585,28 +611,31 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key) (uchar*) key, char_length_key, 0)) return 1; } - else if (seg->type == HA_KEYTYPE_VARTEXT) + else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uchar *pos= (uchar*) rec + seg->start; CHARSET_INFO *cs= seg->charset; - uint char_length_rec= uint2korr(pos); + uint pack_length= seg->bit_start; + uint char_length_rec= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + /* Key segments are always packed with 2 bytes */ uint char_length_key= uint2korr(key); - + pos+= pack_length; + key+= 2; /* skip key pack length */ if (cs->mbmaxlen > 1) { uint char_length= seg->length / cs->mbmaxlen; - char_length_key= my_charpos(cs, key+2, key +2 + char_length_key, + char_length_key= my_charpos(cs, key, key + char_length_key, char_length); set_if_smaller(char_length_key, seg->length); - char_length_rec= my_charpos(cs, pos +2 , pos + 2 + char_length_rec, + char_length_rec= my_charpos(cs, pos, pos + char_length_rec, char_length); set_if_smaller(char_length_rec, seg->length); } - if (cs->coll->strnncollsp(seg->charset, - (uchar*) pos+2, char_length_rec, - (uchar*) key+2, char_length_key, 0)) + (uchar*) pos, char_length_rec, + (uchar*) key, char_length_key, 0)) return 1; } else @@ -638,6 +667,8 @@ void hp_make_key(HP_KEYDEF *keydef, byte *key, const byte *rec) char_length / cs->mbmaxlen); set_if_smaller(char_length, seg->length); /* QQ: ok to remove? */ } + if (seg->type == HA_KEYTYPE_VARTEXT1) + char_length+= seg->bit_start; /* Copy also length */ memcpy(key,rec+seg->start,(size_t) char_length); key+= char_length; } @@ -707,11 +738,13 @@ uint hp_rb_make_key(HP_KEYDEF *keydef, byte *key, { uchar *pos= (uchar*) rec + seg->start; uint length= seg->length; - uint tmp_length= uint2korr(pos); + uint pack_length= seg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); CHARSET_INFO *cs= seg->charset; char_length= length/cs->mbmaxlen; - pos+=2; /* Skip VARCHAR length */ + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); diff --git a/heap/hp_open.c b/heap/hp_open.c index 1fa832208fb..fd937229b0d 100644 --- a/heap/hp_open.c +++ b/heap/hp_open.c @@ -63,7 +63,7 @@ HP_INFO *heap_open(const char *name, int mode) #ifndef DBUG_OFF info->opt_flag= READ_CHECK_USED; /* Check when changing */ #endif - DBUG_PRINT("exit",("heap: %lx reclength: %d records_in_block: %d", + DBUG_PRINT("exit",("heap: 0x%lx reclength: %d records_in_block: %d", info,share->reclength,share->block.records_in_block)); DBUG_RETURN(info); } @@ -82,7 +82,7 @@ HP_SHARE *hp_find_named_heap(const char *name) info= (HP_SHARE*) pos->data; if (!strcmp(name, info->name)) { - DBUG_PRINT("exit", ("Old heap_database: %lx",info)); + DBUG_PRINT("exit", ("Old heap_database: 0x%lx",info)); DBUG_RETURN(info); } } diff --git a/heap/hp_rkey.c b/heap/hp_rkey.c index a88139bbdee..f5f22a877a1 100644 --- a/heap/hp_rkey.c +++ b/heap/hp_rkey.c @@ -23,7 +23,7 @@ int heap_rkey(HP_INFO *info, byte *record, int inx, const byte *key, HP_SHARE *share= info->s; HP_KEYDEF *keyinfo= share->keydef + inx; DBUG_ENTER("heap_rkey"); - DBUG_PRINT("enter",("base: %lx inx: %d",info,inx)); + DBUG_PRINT("enter",("base: 0x%lx inx: %d",info,inx)); if ((uint) inx >= share->keys) { diff --git a/heap/hp_rrnd.c b/heap/hp_rrnd.c index cce3ce24e51..4daa3a06377 100644 --- a/heap/hp_rrnd.c +++ b/heap/hp_rrnd.c @@ -29,7 +29,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: %lx pos: %lx",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %lx",info,pos)); info->lastinx= -1; if (!(info->current_ptr= pos)) @@ -44,7 +44,7 @@ int heap_rrnd(register HP_INFO *info, byte *record, byte *pos) } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at %lx",info->current_ptr)); + DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ @@ -64,7 +64,7 @@ int heap_rrnd_old(register HP_INFO *info, byte *record, ulong pos) { HP_SHARE *share=info->s; DBUG_ENTER("heap_rrnd"); - DBUG_PRINT("enter",("info: %lx pos: %ld",info,pos)); + DBUG_PRINT("enter",("info: 0x%lx pos: %ld",info,pos)); info->lastinx= -1; if (pos == (ulong) -1) @@ -98,7 +98,7 @@ end: } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); - DBUG_PRINT("exit",("found record at %lx",info->current_ptr)); + DBUG_PRINT("exit",("found record at 0x%lx",info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); } /* heap_rrnd */ diff --git a/heap/hp_write.c b/heap/hp_write.c index 577c52a007d..171998e9125 100644 --- a/heap/hp_write.c +++ b/heap/hp_write.c @@ -138,7 +138,7 @@ static byte *next_free_record_pos(HP_SHARE *info) pos=info->del_link; info->del_link= *((byte**) pos); info->deleted--; - DBUG_PRINT("exit",("Used old position: %lx",pos)); + DBUG_PRINT("exit",("Used old position: 0x%lx",pos)); DBUG_RETURN(pos); } if (!(block_pos=(info->records % info->block.records_in_block))) diff --git a/include/my_base.h b/include/my_base.h index 88d3ec0b270..b300adc3adf 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -181,8 +181,12 @@ enum ha_base_keytype { HA_KEYTYPE_INT24=12, HA_KEYTYPE_UINT24=13, HA_KEYTYPE_INT8=14, - HA_KEYTYPE_VARTEXT=15, /* Key is sorted as letters */ - HA_KEYTYPE_VARBINARY=16 /* Key is sorted as unsigned chars */ + /* Varchar (0-255 bytes) with length packed with 1 byte */ + HA_KEYTYPE_VARTEXT1=15, /* Key is sorted as letters */ + HA_KEYTYPE_VARBINARY1=16, /* Key is sorted as unsigned chars */ + /* Varchar (0-65535 bytes) with length packed with 2 bytes */ + HA_KEYTYPE_VARTEXT2=17, /* Key is sorted as letters */ + HA_KEYTYPE_VARBINARY2=18 /* Key is sorted as unsigned chars */ }; #define HA_MAX_KEYTYPE 31 /* Must be log2-1 */ @@ -390,4 +394,6 @@ typedef ulong ha_rows; #define MAX_FILE_SIZE LONGLONG_MAX #endif +#define HA_VARCHAR_PACKLENGTH(field_length) ((field_length) < 256 ? 1 :2) + #endif /* _my_base_h */ diff --git a/myisam/ft_static.c b/myisam/ft_static.c index 3b186f7b179..cdb1580e706 100644 --- a/myisam/ft_static.c +++ b/myisam/ft_static.c @@ -25,9 +25,9 @@ char ft_boolean_syntax[]="+ -><()~*:\"\"&|"; const HA_KEYSEG ft_keysegs[FT_SEGS]={ { - HA_KEYTYPE_VARTEXT, /* type */ + HA_KEYTYPE_VARTEXT2, /* type */ 63, /* language (will be overwritten) */ - 0, 0, 0, /* null_bit, bit_start, bit_end */ + 0, 2, 0, /* null_bit, bit_start, bit_end */ HA_VAR_LENGTH_PART | HA_PACK_KEY, /* flag */ HA_FT_MAXBYTELEN, /* length */ HA_FT_WLEN, /* start */ diff --git a/myisam/ft_test1.c b/myisam/ft_test1.c index a92c85924de..14be9aa1e8c 100644 --- a/myisam/ft_test1.c +++ b/myisam/ft_test1.c @@ -79,24 +79,24 @@ static int run_test(const char *filename) recinfo[0].length= (extra_field == FIELD_BLOB ? 4 + mi_portable_sizeof_char_ptr : extra_length); if (extra_field == FIELD_VARCHAR) - recinfo[0].length+=2; + recinfo[0].length+= HA_VARCHAR_PACKLENGTH(extra_length); recinfo[1].type=key_field; recinfo[1].length= (key_field == FIELD_BLOB ? 4+mi_portable_sizeof_char_ptr : key_length); if (key_field == FIELD_VARCHAR) - recinfo[1].length+=2; + recinfo[1].length+= HA_VARCHAR_PACKLENGTH(key_length); /* Define a key over the first column */ keyinfo[0].seg=keyseg; keyinfo[0].keysegs=1; keyinfo[0].seg[0].type= key_type; - keyinfo[0].seg[0].flag= (key_field == FIELD_BLOB)?HA_BLOB_PART: - (key_field == FIELD_VARCHAR)?HA_VAR_LENGTH_PART:0; + keyinfo[0].seg[0].flag= (key_field == FIELD_BLOB) ? HA_BLOB_PART: + (key_field == FIELD_VARCHAR) ? HA_VAR_LENGTH_PART:0; keyinfo[0].seg[0].start=recinfo[0].length; keyinfo[0].seg[0].length=key_length; keyinfo[0].seg[0].null_bit= 0; keyinfo[0].seg[0].null_pos=0; - keyinfo[0].seg[0].language=MY_CHARSET_CURRENT; + keyinfo[0].seg[0].language= default_charset_info->number; keyinfo[0].flag = (no_fulltext?HA_PACK_KEY:HA_FULLTEXT); if (!silent) @@ -155,33 +155,42 @@ static int run_test(const char *filename) if (!silent) printf("- Reading rows with key\n"); for (i=0 ; i < NQUERIES ; i++) - { FT_DOCLIST *result; + { + FT_DOCLIST *result; result=ft_nlq_init_search(file,0,(char*) query[i],strlen(query[i]),1); - if(!result) { + if(!result) + { printf("Query %d: `%s' failed with errno %3d\n",i,query[i],my_errno); continue; } printf("Query %d: `%s'. Found: %d. Top five documents:\n", - i,query[i],result->ndocs); - for(j=0;j<5;j++) { double w; int err; - err=ft_nlq_read_next(result, read_record); - if(err==HA_ERR_END_OF_FILE) { - printf("No more matches!\n"); - break; - } else if (err) { - printf("ft_read_next %d failed with errno %3d\n",j,my_errno); - break; - } - w=ft_nlq_get_relevance(result); - if(key_field == FIELD_VARCHAR) { - uint l; - char *p; - p=recinfo[0].length+read_record; - l=uint2korr(p); - printf("%10.7f: %.*s\n",w,(int) l,p+2); - } else - printf("%10.7f: %.*s\n",w,recinfo[1].length, - recinfo[0].length+read_record); + i,query[i],result->ndocs); + for (j=0;j<5;j++) + { + double w; int err; + err= ft_nlq_read_next(result, read_record); + if (err==HA_ERR_END_OF_FILE) + { + printf("No more matches!\n"); + break; + } + else if (err) + { + printf("ft_read_next %d failed with errno %3d\n",j,my_errno); + break; + } + w=ft_nlq_get_relevance(result); + if (key_field == FIELD_VARCHAR) + { + uint l; + char *p; + p=recinfo[0].length+read_record; + l=uint2korr(p); + printf("%10.7f: %.*s\n",w,(int) l,p+2); + } + else + printf("%10.7f: %.*s\n",w,recinfo[1].length, + recinfo[0].length+read_record); } ft_nlq_close_search(result); } @@ -215,9 +224,14 @@ void create_record(char *pos, int n) else if (recinfo[0].type == FIELD_VARCHAR) { uint tmp; - strnmov(pos+2,data[n].f0,keyinfo[0].seg[0].length); - tmp=strlen(pos+2); - int2store(pos,tmp); + /* -1 is here because pack_length is stored in seg->length */ + uint pack_length= HA_VARCHAR_PACKLENGTH(keyinfo[0].seg[0].length-1); + strnmov(pos+pack_length,data[n].f0,keyinfo[0].seg[0].length); + tmp=strlen(pos+pack_length); + if (pack_length == 1) + *pos= (char) tmp; + else + int2store(pos,tmp); pos+=recinfo[0].length; } else @@ -239,9 +253,14 @@ void create_record(char *pos, int n) else if (recinfo[1].type == FIELD_VARCHAR) { uint tmp; - strnmov(pos+2,data[n].f2,keyinfo[0].seg[0].length); - tmp=strlen(pos+2); - int2store(pos,tmp); + /* -1 is here because pack_length is stored in seg->length */ + uint pack_length= HA_VARCHAR_PACKLENGTH(keyinfo[0].seg[0].length-1); + strnmov(pos+pack_length,data[n].f2,keyinfo[0].seg[0].length); + tmp=strlen(pos+1); + if (pack_length == 1) + *pos= (char) tmp; + else + int2store(pos,tmp); pos+=recinfo[1].length; } else diff --git a/myisam/ft_update.c b/myisam/ft_update.c index 8dafefe77a8..b8cd925bf4f 100644 --- a/myisam/ft_update.c +++ b/myisam/ft_update.c @@ -58,29 +58,27 @@ uint _mi_ft_segiterator(register FT_SEG_ITERATOR *ftsi) DBUG_ENTER("_mi_ft_segiterator"); if (!ftsi->num) - { DBUG_RETURN(0); - } - else - ftsi->num--; + + ftsi->num--; if (!ftsi->seg) - { DBUG_RETURN(1); - } - else - ftsi->seg--; + + ftsi->seg--; if (ftsi->seg->null_bit && (ftsi->rec[ftsi->seg->null_pos] & ftsi->seg->null_bit)) { - ftsi->pos=0; - DBUG_RETURN(1); + ftsi->pos=0; + DBUG_RETURN(1); } ftsi->pos= ftsi->rec+ftsi->seg->start; if (ftsi->seg->flag & HA_VAR_LENGTH_PART) { - ftsi->len=uint2korr(ftsi->pos); - ftsi->pos+=2; /* Skip VARCHAR length */ + uint pack_length= (ftsi->seg->bit_start); + ftsi->len= (pack_length == 1 ? (uint) *(uchar*) ftsi->pos : + uint2korr(ftsi->pos)); + ftsi->pos+= pack_length; /* Skip VARCHAR length */ DBUG_RETURN(1); } if (ftsi->seg->flag & HA_BLOB_PART) @@ -296,9 +294,11 @@ uint _ft_make_key(MI_INFO *info, uint keynr, byte *keybuf, FT_WORD *wptr, DBUG_RETURN(_mi_make_key(info,keynr,(uchar*) keybuf,buf,filepos)); } + /* convert key value to ft2 */ + uint _mi_ft_convert_to_ft2(MI_INFO *info, uint keynr, uchar *key) { my_off_t root; @@ -316,9 +316,12 @@ uint _mi_ft_convert_to_ft2(MI_INFO *info, uint keynr, uchar *key) get_key_full_length_rdonly(key_length, key); while (_mi_ck_delete(info, keynr, key, key_length) == 0) - /* nothing to do here. - _mi_ck_delete() will populate info->ft1_to_ft2 with deleted keys - */; + { + /* + nothing to do here. + _mi_ck_delete() will populate info->ft1_to_ft2 with deleted keys + */ + } /* creating pageful of keys */ mi_putint(info->buff,length+2,0); diff --git a/myisam/mi_check.c b/myisam/mi_check.c index 112a371c9fe..b8f992dc21a 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -280,7 +280,8 @@ int chk_size(MI_CHECK *param, register MI_INFO *info) size=my_seek(info->s->kfile,0L,MY_SEEK_END,MYF(0)); if ((skr=(my_off_t) info->state->key_file_length) != size) { - if (skr > size) + /* Don't give error if file generated by myisampack */ + if (skr > size && info->s->state.key_map) { error=1; mi_check_print_error(param, diff --git a/myisam/mi_checksum.c b/myisam/mi_checksum.c index 95338434211..33a51068fb0 100644 --- a/myisam/mi_checksum.c +++ b/myisam/mi_checksum.c @@ -40,8 +40,12 @@ ha_checksum mi_checksum(MI_INFO *info, const byte *buf) } case FIELD_VARCHAR: { - length=uint2korr(buf); - pos=buf+2; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length-1); + if (pack_length == 1) + length= (ulong) *(uchar*) buf; + else + length= uint2korr(buf); + pos= buf+pack_length; break; } default: diff --git a/myisam/mi_create.c b/myisam/mi_create.c index e139997e0c7..0164555272d 100644 --- a/myisam/mi_create.c +++ b/myisam/mi_create.c @@ -43,7 +43,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, myf create_flag; uint fields,length,max_key_length,packed,pointer,real_length_diff, key_length,info_length,key_segs,options,min_key_length_skip, - base_pos,varchar_count,long_varchar_count,varchar_length, + base_pos,long_varchar_count,varchar_length, max_key_block_length,unique_key_parts,fulltext_keys,offset; ulong reclength, real_reclength,min_pack_length; char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr; @@ -99,7 +99,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* Start by checking fields and field-types used */ - reclength=varchar_count=varchar_length=long_varchar_count=packed= + reclength=varchar_length=long_varchar_count=packed= min_pack_length=pack_reclength=0; for (rec=recinfo, fields=0 ; fields != columns ; @@ -130,14 +130,15 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } else if (type == FIELD_VARCHAR) { - varchar_count++; - varchar_length+=rec->length-2; + varchar_length+= rec->length-1; /* Used for min_pack_length */ packed--; - pack_reclength+=1; - if (test(rec->length > 257)) - { /* May be packed on 3 bytes */ + pack_reclength++; + min_pack_length++; + /* We must test for 257 as length includes pack-length */ + if (test(rec->length >= 257)) + { long_varchar_count++; - pack_reclength+=2; + pack_reclength+= 2; /* May be packed on 3 bytes */ } } else if (type != FIELD_SKIP_ZERO) @@ -169,12 +170,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* We can't use checksum with static length rows */ if (!(options & HA_OPTION_PACK_RECORD)) options&= ~HA_OPTION_CHECKSUM; - if (options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) - min_pack_length+=varchar_count; /* Min length to pack */ - else - { - min_pack_length+=varchar_length+2*varchar_count; - } + if (!(options & (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD))) + min_pack_length+= varchar_length; if (flags & HA_CREATE_TMP_TABLE) options|= HA_OPTION_TMP_TABLE; if (flags & HA_CREATE_CHECKSUM || (options & HA_OPTION_CHECKSUM)) @@ -220,7 +217,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, reclength=pointer+1; /* reserve place for delete link */ } else - reclength+=long_varchar_count; /* We need space for this! */ + reclength+= long_varchar_count; /* We need space for varchar! */ max_key_length=0; tot_length=0 ; key_segs=0; fulltext_keys=0; @@ -261,7 +258,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, j++, keyseg++) { if (keyseg->type != HA_KEYTYPE_BINARY && - keyseg->type != HA_KEYTYPE_VARBINARY) + keyseg->type != HA_KEYTYPE_VARBINARY1 && + keyseg->type != HA_KEYTYPE_VARBINARY2) { my_errno=HA_WRONG_CREATE_OPTION; goto err; @@ -285,11 +283,22 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, j++, keyseg++) { if (keyseg->type != HA_KEYTYPE_TEXT && - keyseg->type != HA_KEYTYPE_VARTEXT) + keyseg->type != HA_KEYTYPE_VARTEXT1 && + keyseg->type != HA_KEYTYPE_VARTEXT2) { my_errno=HA_WRONG_CREATE_OPTION; goto err; } + if (!(keyseg->flag & HA_BLOB_PART) && + (keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARTEXT2)) + { + /* Make a flag that this is a VARCHAR */ + keyseg->flag|= HA_VAR_LENGTH_PART; + /* Store in bit_start number of bytes used to pack the length */ + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1)? + 1 : 2); + } } fulltext_keys++; @@ -345,10 +354,19 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, case HA_KEYTYPE_INT8: keyseg->flag|= HA_SWAP_KEY; break; - case HA_KEYTYPE_VARTEXT: - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: if (!(keyseg->flag & HA_BLOB_PART)) + { + /* Make a flag that this is a VARCHAR */ keyseg->flag|= HA_VAR_LENGTH_PART; + /* Store in bit_start number of bytes used to pack the length */ + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARBINARY1) ? + 1 : 2); + } break; default: break; @@ -368,6 +386,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART)) { + DBUG_ASSERT(!test_all_bits(keyseg->flag, + (HA_VAR_LENGTH_PART | HA_BLOB_PART))); keydef->flag|=HA_VAR_LENGTH_KEY; length++; /* At least one length byte */ options|=HA_OPTION_PACK_KEYS; /* Using packed keys */ @@ -646,11 +666,31 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, /* Save unique definition */ for (i=0 ; i < share.state.header.uniques ; i++) { + HA_KEYSEG *keyseg_end; + keyseg= uniquedefs[i].seg; if (mi_uniquedef_write(file, &uniquedefs[i])) goto err; - for (j=0 ; j < uniquedefs[i].keysegs ; j++) + for (keyseg= uniquedefs[i].seg, keyseg_end= keyseg+ uniquedefs[i].keysegs; + keyseg < keyseg_end; + keyseg++) { - if (mi_keyseg_write(file, &uniquedefs[i].seg[j])) + switch (keyseg->type) { + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: + if (!(keyseg->flag & HA_BLOB_PART)) + { + keyseg->flag|= HA_VAR_LENGTH_PART; + keyseg->bit_start= ((keyseg->type == HA_KEYTYPE_VARTEXT1 || + keyseg->type == HA_KEYTYPE_VARBINARY1) ? + 1 : 2); + } + break; + default: + break; + } + if (mi_keyseg_write(file, keyseg)) goto err; } } diff --git a/myisam/mi_dbug.c b/myisam/mi_dbug.c index 02d1c7d05d6..531d0b9ddba 100644 --- a/myisam/mi_dbug.c +++ b/myisam/mi_dbug.c @@ -132,8 +132,10 @@ void _mi_print_key(FILE *stream, register HA_KEYSEG *keyseg, break; } #endif - case HA_KEYTYPE_VARTEXT: /* VARCHAR and TEXT */ - case HA_KEYTYPE_VARBINARY: /* VARBINARY and BLOB */ + case HA_KEYTYPE_VARTEXT1: /* VARCHAR and TEXT */ + case HA_KEYTYPE_VARTEXT2: /* VARCHAR and TEXT */ + case HA_KEYTYPE_VARBINARY1: /* VARBINARY and BLOB */ + case HA_KEYTYPE_VARBINARY2: /* VARBINARY and BLOB */ { uint tmp_length; get_key_length(tmp_length,key); diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index 0b8d3c97872..9d8e161b8fe 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -768,11 +768,21 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) } else if (type == FIELD_VARCHAR) { - uint tmp_length=uint2korr(from); - store_key_length_inc(to,tmp_length); - memcpy(to,from+2,tmp_length); - to+=tmp_length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length -1); + uint tmp_length; + if (pack_length == 1) + { + tmp_length= (uint) *(uchar*) from; + *to++= *from; + } + else + { + tmp_length= uint2korr(from); + store_key_length_inc(to,tmp_length); + } + memcpy(to, from+pack_length,tmp_length); + to+= tmp_length; + continue; } else { @@ -878,9 +888,20 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, } else if (type == FIELD_VARCHAR) { - uint tmp_length=uint2korr(record); - to+=get_pack_length(tmp_length)+tmp_length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec->length -1); + uint tmp_length; + if (pack_length == 1) + { + tmp_length= (uint) *(uchar*) record; + to+= 1+ tmp_length; + continue; + } + else + { + tmp_length= uint2korr(record); + to+= get_pack_length(tmp_length)+tmp_length; + } + continue; } else { @@ -894,9 +915,7 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, } } else - { - to+=length; - } + to+= length; } if (packed_length != (uint) (to - rec_buff) + test(info->s->calc_checksum) || (bit != 1 && (flag & ~(bit - 1)))) @@ -947,13 +966,27 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, { if (type == FIELD_VARCHAR) { - get_key_length(length,from); - if (length > rec_length-2) - goto err; - int2store(to,length); - memcpy(to+2,from,length); - from+=length; - continue; + uint pack_length= HA_VARCHAR_PACKLENGTH(rec_length-1); + if (pack_length == 1) + { + length= (uint) *(uchar*) from; + if (length > rec_length-1) + goto err; + *to= *from++; + } + else + { + get_key_length(length, from); + if (length > rec_length-2) + goto err; + int2store(to,length); + } + if (from+length > from_end) + goto err; + memcpy(to+pack_length, from, length); + from+= length; + min_pack_length--; + continue; } if (flag & bit) { @@ -1021,15 +1054,17 @@ ulong _mi_rec_unpack(register MI_INFO *info, register byte *to, byte *from, if (min_pack_length > (uint) (from_end - from)) goto err; min_pack_length-=rec_length; - memcpy(to,(byte*) from,(size_t) rec_length); from+=rec_length; + memcpy(to, (byte*) from, (size_t) rec_length); + from+=rec_length; } } if (info->s->calc_checksum) from++; if (to == to_end && from == from_end && (bit == 1 || !(flag & ~(bit-1)))) DBUG_RETURN(found_length); + err: - my_errno=HA_ERR_RECORD_DELETED; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_PRINT("error",("to_end: %lx -> %lx from_end: %lx -> %lx", to,to_end,from,from_end)); DBUG_DUMP("from",(byte*) info->rec_buff,info->s->base.min_pack_length); diff --git a/myisam/mi_key.c b/myisam/mi_key.c index a775e0ba2d0..900ced71acc 100644 --- a/myisam/mi_key.c +++ b/myisam/mi_key.c @@ -34,10 +34,20 @@ static int _mi_put_key_in_record(MI_INFO *info,uint keynr,byte *record); - /* - ** Make a intern key from a record - ** Ret: Length of key - */ +/* + Make a intern key from a record + + SYNOPSIS + _mi_make_key() + info MyiSAM handler + keynr key number + key Store created key here + record Record + filepos Position to record in the data file + + RETURN + Length of key +*/ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, const byte *record, my_off_t filepos) @@ -104,8 +114,10 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, } if (keyseg->flag & HA_VAR_LENGTH_PART) { - uint tmp_length=uint2korr(pos); - pos+=2; /* Skip VARCHAR length */ + uint pack_length= keyseg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); FIX_LENGTH(cs, pos, length, char_length); store_key_length_inc(key,char_length); @@ -365,9 +377,12 @@ static int _mi_put_key_in_record(register MI_INFO *info, uint keynr, goto err; #endif /* Store key length */ - int2store(record+keyseg->start, length); + if (keyseg->bit_start == 1) + *(uchar*) (record+keyseg->start)= (uchar) length; + else + int2store(record+keyseg->start, length); /* And key data */ - memcpy(record+keyseg->start+2,(byte*) key, length); + memcpy(record+keyseg->start + keyseg->bit_start, (byte*) key, length); key+= length; } else if (keyseg->flag & HA_BLOB_PART) diff --git a/myisam/mi_open.c b/myisam/mi_open.c index 562227d2f03..0ebbd90a00f 100644 --- a/myisam/mi_open.c +++ b/myisam/mi_open.c @@ -314,7 +314,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) { disk_pos=mi_keyseg_read(disk_pos, pos); - if (pos->type == HA_KEYTYPE_TEXT || pos->type == HA_KEYTYPE_VARTEXT) + if (pos->type == HA_KEYTYPE_TEXT || + pos->type == HA_KEYTYPE_VARTEXT1 || + pos->type == HA_KEYTYPE_VARTEXT2) { if (!pos->language) pos->charset=default_charset_info; @@ -389,7 +391,9 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) for (j=0 ; j < share->uniqueinfo[i].keysegs; j++,pos++) { disk_pos=mi_keyseg_read(disk_pos, pos); - if (pos->type == HA_KEYTYPE_TEXT || pos->type == HA_KEYTYPE_VARTEXT) + if (pos->type == HA_KEYTYPE_TEXT || + pos->type == HA_KEYTYPE_VARTEXT1 || + pos->type == HA_KEYTYPE_VARTEXT2) { if (!pos->language) pos->charset=default_charset_info; diff --git a/myisam/mi_packrec.c b/myisam/mi_packrec.c index a277c2ca9d1..62d15c03266 100644 --- a/myisam/mi_packrec.c +++ b/myisam/mi_packrec.c @@ -91,8 +91,10 @@ static void uf_zero(MI_COLUMNDEF *rec,MI_BIT_BUFF *bit_buff, uchar *to,uchar *end); static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end); -static void uf_varchar(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, - uchar *to, uchar *end); +static void uf_varchar1(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end); +static void uf_varchar2(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end); static void decode_bytes(MI_COLUMNDEF *rec,MI_BIT_BUFF *bit_buff, uchar *to,uchar *end); static uint decode_pos(MI_BIT_BUFF *bit_buff,MI_DECODE_TREE *decode_tree); @@ -522,14 +524,16 @@ static void (*get_unpack_function(MI_COLUMNDEF *rec)) case FIELD_BLOB: return &uf_blob; case FIELD_VARCHAR: - return &uf_varchar; + if (rec->length <= 256) /* 255 + 1 byte length */ + return &uf_varchar1; + return &uf_varchar2; case FIELD_LAST: default: return 0; /* This should never happend */ } } - /* De different functions to unpack a field */ + /* The different functions to unpack a field */ static void uf_zerofill_skip_zero(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end) @@ -773,7 +777,22 @@ static void uf_blob(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, } } -static void uf_varchar(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + +static void uf_varchar1(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, + uchar *to, uchar *end __attribute__((unused))) +{ + if (get_bit(bit_buff)) + to[0]= 0; /* Zero lengths */ + else + { + ulong length=get_bits(bit_buff,rec->space_length_bits); + *to= (uchar) length; + decode_bytes(rec,bit_buff,to+1,to+1+length); + } +} + + +static void uf_varchar2(MI_COLUMNDEF *rec, MI_BIT_BUFF *bit_buff, uchar *to, uchar *end __attribute__((unused))) { if (get_bit(bit_buff)) diff --git a/myisam/mi_search.c b/myisam/mi_search.c index 2fef70db9f0..2259dd17fcd 100644 --- a/myisam/mi_search.c +++ b/myisam/mi_search.c @@ -425,7 +425,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, if (len < cmplen) { if ((keyinfo->seg->type != HA_KEYTYPE_TEXT && - keyinfo->seg->type != HA_KEYTYPE_VARTEXT)) + keyinfo->seg->type != HA_KEYTYPE_VARTEXT1 && + keyinfo->seg->type != HA_KEYTYPE_VARTEXT2)) my_flag= -1; else { @@ -1371,7 +1372,8 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key, sort_order=0; if ((keyinfo->flag & HA_FULLTEXT) && ((keyseg->type == HA_KEYTYPE_TEXT) || - (keyseg->type == HA_KEYTYPE_VARTEXT)) && + (keyseg->type == HA_KEYTYPE_VARTEXT1) || + (keyseg->type == HA_KEYTYPE_VARTEXT2)) && !use_strnxfrm(keyseg->charset)) sort_order=keyseg->charset->sort_order; diff --git a/myisam/mi_test1.c b/myisam/mi_test1.c index 15ce7515ac2..aa6cd98ac8e 100644 --- a/myisam/mi_test1.c +++ b/myisam/mi_test1.c @@ -75,11 +75,11 @@ static int run_test(const char *filename) recinfo[1].length= (key_field == FIELD_BLOB ? 4+mi_portable_sizeof_char_ptr : key_length); if (key_field == FIELD_VARCHAR) - recinfo[1].length+=2; + recinfo[1].length+= HA_VARCHAR_PACKLENGTH(key_length);; recinfo[2].type=extra_field; recinfo[2].length= (extra_field == FIELD_BLOB ? 4 + mi_portable_sizeof_char_ptr : 24); if (extra_field == FIELD_VARCHAR) - recinfo[2].length+=2; + recinfo[2].length+= HA_VARCHAR_PACKLENGTH(recinfo[2].length); if (opt_unique) { recinfo[3].type=FIELD_CHECK; @@ -88,6 +88,9 @@ static int run_test(const char *filename) rec_length=recinfo[0].length+recinfo[1].length+recinfo[2].length+ recinfo[3].length; + if (key_type == HA_KEYTYPE_VARTEXT1 && + key_length > 255) + key_type= HA_KEYTYPE_VARTEXT2; /* Define a key over the first column */ keyinfo[0].seg=keyseg; @@ -330,7 +333,8 @@ static void create_key_part(char *key,uint rownr) { sprintf(key,"%*d",keyinfo[0].seg[0].length,rownr); } - else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT) + else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT1 || + keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT2) { /* Alpha record */ /* Create a key that may be easily packed */ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B'); @@ -410,11 +414,14 @@ static void create_record(char *record,uint rownr) } else if (recinfo[1].type == FIELD_VARCHAR) { - uint tmp; - create_key_part(pos+2,rownr); - tmp=strlen(pos+2); - int2store(pos,tmp); - pos+=recinfo[1].length; + uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + create_key_part(pos+pack_length,rownr); + tmp= strlen(pos+pack_length); + if (pack_length == 1) + *(uchar*) pos= (uchar) tmp; + else + int2store(pos,tmp); + pos+= recinfo[1].length; } else { @@ -434,10 +441,13 @@ static void create_record(char *record,uint rownr) } else if (recinfo[2].type == FIELD_VARCHAR) { - uint tmp; - sprintf(pos+2,"... row: %d", rownr); - tmp=strlen(pos+2); - int2store(pos,tmp); + uint tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + sprintf(pos+pack_length, "... row: %d", rownr); + tmp= strlen(pos+pack_length); + if (pack_length == 1) + *(uchar*) pos= (uchar) tmp; + else + int2store(pos,tmp); } else { @@ -466,8 +476,9 @@ static void update_record(char *record) } else if (recinfo[1].type == FIELD_VARCHAR) { - uint length=uint2korr(pos); - my_casedn(default_charset_info,pos+2,length); + uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos); + my_casedn(default_charset_info,pos+pack_length,length); pos+=recinfo[1].length; } else @@ -493,10 +504,14 @@ static void update_record(char *record) else if (recinfo[2].type == FIELD_VARCHAR) { /* Second field is longer than 10 characters */ - uint length=uint2korr(pos); - bfill(pos+2+length,recinfo[2].length-length-2,'.'); - length=recinfo[2].length-2; - int2store(pos,length); + uint pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1); + uint length= pack_length == 1 ? (uint) *(uchar*) pos : uint2korr(pos); + bfill(pos+pack_length+length,recinfo[2].length-length-pack_length,'.'); + length=recinfo[2].length-pack_length; + if (pack_length == 1) + *(uchar*) pos= (uchar) length; + else + int2store(pos,length); } else { @@ -519,7 +534,7 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"insert_rows", 'i', "Undocumented", (gptr*) &insert_count, (gptr*) &insert_count, 0, GET_UINT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0}, - {"key_alpha", 'a', "Undocumented", + {"key_alpha", 'a', "Use a key of type HA_KEYTYPE_TEXT", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_binary_pack", 'B', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -535,9 +550,9 @@ static struct my_option my_long_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"key_space_pack", 'p', "Undocumented", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"key_varchar", 'w', "Undocumented", + {"key_varchar", 'w', "Test VARCHAR keys", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"null_fields", 'N', "Undocumented", + {"null_fields", 'N', "Define fields with NULL", (gptr*) &null_fields, (gptr*) &null_fields, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"row_fixed_size", 'S', "Undocumented", @@ -604,7 +619,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), key_field=FIELD_BLOB; /* blob key */ extra_field= FIELD_BLOB; pack_seg|= HA_BLOB_PART; - key_type= HA_KEYTYPE_VARTEXT; + key_type= HA_KEYTYPE_VARTEXT1; break; case 'k': if (key_length < 4 || key_length > MI_MAX_KEY_LENGTH) @@ -616,11 +631,11 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case 'w': key_field=FIELD_VARCHAR; /* varchar keys */ extra_field= FIELD_VARCHAR; - key_type= HA_KEYTYPE_VARTEXT; + key_type= HA_KEYTYPE_VARTEXT1; pack_seg|= HA_VAR_LENGTH_PART; create_flag|= HA_PACK_RECORD; break; - case 'K': /* Use key cacheing */ + case 'K': /* Use key cacheing */ key_cacheing=1; break; case 'V': diff --git a/myisam/mi_test3.c b/myisam/mi_test3.c index 27d23317b5c..be4277cc65c 100644 --- a/myisam/mi_test3.c +++ b/myisam/mi_test3.c @@ -67,6 +67,7 @@ int main(int argc,char **argv) bzero((char*) keyinfo,sizeof(keyinfo)); bzero((char*) recinfo,sizeof(recinfo)); + bzero((char*) keyseg,sizeof(keyseg)); keyinfo[0].seg= &keyseg[0][0]; keyinfo[0].seg[0].start=0; keyinfo[0].seg[0].length=8; diff --git a/myisam/mi_test_all.res b/myisam/mi_test_all.res index 94355bf1aa2..16b517d3f76 100644 --- a/myisam/mi_test_all.res +++ b/myisam/mi_test_all.res @@ -1,3 +1,6 @@ +myisamchk: MyISAM file test1 +myisamchk: warning: Size of indexfile is: 1024 Should be: 2048 +MyISAM-table 'test1' is usable but should be fixed mi_test2 -s -L -K -R1 -m2000 ; Should give error 135 Error: 135 in write at record: 1105 got error: 135 when using MyISAM-database @@ -5,46 +8,46 @@ myisamchk: MyISAM file test2 myisamchk: warning: Datafile is almost full, 65532 of 65534 used MyISAM-table 'test2' is usable but should be fixed Commands Used count Errors Recover errors -open 17 0 0 -write 850 0 0 -update 85 0 0 -delete 850 0 0 -close 17 0 0 -extra 102 0 0 -Total 1921 0 0 +open 1 0 0 +write 50 0 0 +update 5 0 0 +delete 50 0 0 +close 1 0 0 +extra 6 0 0 +Total 113 0 0 Commands Used count Errors Recover errors -open 18 0 0 -write 900 0 0 -update 90 0 0 -delete 900 0 0 -close 18 0 0 -extra 108 0 0 -Total 2034 0 0 +open 2 0 0 +write 100 0 0 +update 10 0 0 +delete 100 0 0 +close 2 0 0 +extra 12 0 0 +Total 226 0 0 -real 0m1.054s -user 0m0.410s -sys 0m0.640s +real 0m0.791s +user 0m0.137s +sys 0m0.117s -real 0m1.077s -user 0m0.550s -sys 0m0.530s +real 0m0.659s +user 0m0.252s +sys 0m0.102s -real 0m1.100s -user 0m0.420s -sys 0m0.680s +real 0m0.571s +user 0m0.188s +sys 0m0.098s -real 0m0.783s -user 0m0.590s -sys 0m0.200s +real 0m1.111s +user 0m0.236s +sys 0m0.037s -real 0m0.764s -user 0m0.560s -sys 0m0.210s +real 0m0.621s +user 0m0.242s +sys 0m0.022s -real 0m0.699s -user 0m0.570s -sys 0m0.130s +real 0m0.698s +user 0m0.248s +sys 0m0.021s -real 0m0.991s -user 0m0.630s -sys 0m0.350s +real 0m0.683s +user 0m0.265s +sys 0m0.079s diff --git a/myisam/mi_unique.c b/myisam/mi_unique.c index c03182456df..f2d5f01be25 100644 --- a/myisam/mi_unique.c +++ b/myisam/mi_unique.c @@ -95,8 +95,10 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) pos= record+keyseg->start; if (keyseg->flag & HA_VAR_LENGTH_PART) { - uint tmp_length=uint2korr(pos); - pos+=2; /* Skip VARCHAR length */ + uint pack_length= keyseg->bit_start; + uint tmp_length= (pack_length == 1 ? (uint) *(uchar*) pos : + uint2korr(pos)); + pos+= pack_length; /* Skip VARCHAR length */ set_if_smaller(length,tmp_length); } else if (keyseg->flag & HA_BLOB_PART) @@ -107,7 +109,8 @@ ha_checksum mi_unique_hash(MI_UNIQUEDEF *def, const byte *record) length=tmp_length; /* The whole blob */ } end= pos+length; - if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT) + if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || + type == HA_KEYTYPE_VARTEXT2) { keyseg->charset->coll->hash_sort(keyseg->charset, (const uchar*) pos, length, &seed1, @@ -157,12 +160,21 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, pos_b= b+keyseg->start; if (keyseg->flag & HA_VAR_LENGTH_PART) { - a_length= uint2korr(pos_a); - b_length= uint2korr(pos_b); - pos_a+= 2; /* Skip VARCHAR length */ - pos_b+= 2; - set_if_smaller(a_length, keyseg->length); - set_if_smaller(b_length, keyseg->length); + uint pack_length= keyseg->bit_start; + if (pack_length == 1) + { + a_length= (uint) *(uchar*) pos_a++; + b_length= (uint) *(uchar*) pos_b++; + } + else + { + a_length= uint2korr(pos_a); + b_length= uint2korr(pos_b); + pos_a+= 2; /* Skip VARCHAR length */ + pos_b+= 2; + } + set_if_smaller(a_length, keyseg->length); /* Safety */ + set_if_smaller(b_length, keyseg->length); /* safety */ } else if (keyseg->flag & HA_BLOB_PART) { @@ -182,7 +194,8 @@ int mi_unique_comp(MI_UNIQUEDEF *def, const byte *a, const byte *b, memcpy_fixed((byte*) &pos_a,pos_a+keyseg->bit_start,sizeof(char*)); memcpy_fixed((byte*) &pos_b,pos_b+keyseg->bit_start,sizeof(char*)); } - if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT) + if (type == HA_KEYTYPE_TEXT || type == HA_KEYTYPE_VARTEXT1 || + type == HA_KEYTYPE_VARTEXT2) { if (mi_compare_text(keyseg->charset, (uchar *) pos_a, a_length, (uchar *) pos_b, b_length, 0, 1)) diff --git a/myisam/myisampack.c b/myisam/myisampack.c index cc520847f70..bda620a594a 100644 --- a/myisam/myisampack.c +++ b/myisam/myisampack.c @@ -849,9 +849,11 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) } else if (count->field_type == FIELD_VARCHAR) { - length=uint2korr(start_pos); - pos=start_pos+2; - end_pos=start_pos+length; + uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint2korr(start_pos)); + pos= start_pos+pack_length; + end_pos= pos+length; set_if_bigger(count->max_length,length); } if (count->field_length <= 8 && @@ -1833,17 +1835,19 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } case FIELD_VARCHAR: { - ulong col_length= uint2korr(start_pos); + uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + ulong col_length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint2korr(start_pos)); if (!col_length) { write_bits(1,1); /* Empty varchar */ } else { - byte *end=start_pos+2+col_length; + byte *end=start_pos+pack_length+col_length; write_bits(0,1); write_bits(col_length,count->length_bits); - for (start_pos+=2 ; start_pos < end ; start_pos++) + for (start_pos+=pack_length ; start_pos < end ; start_pos++) write_bits(tree->code[(uchar) *start_pos], (uint) tree->code_len[(uchar) *start_pos]); } diff --git a/mysql-test/include/varchar.inc b/mysql-test/include/varchar.inc index 6c9b62065c5..32140bc7146 100644 --- a/mysql-test/include/varchar.inc +++ b/mysql-test/include/varchar.inc @@ -92,6 +92,66 @@ select sql_big_result c,count(t) from t1 group by c limit 10; select t,count(*) from t1 group by t limit 10; select t,count(t) from t1 group by t limit 10; select sql_big_result t,count(t) from t1 group by t limit 10; + +# +# Test varchar > 255 bytes +# + +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +show create table t1; +select count(*) from t1 where v='a'; +select count(*) from t1 where v='a '; +select count(*) from t1 where v between 'a' and 'a '; +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +select count(*) from t1 where v like 'a%'; +select count(*) from t1 where v like 'a %'; +explain select count(*) from t1 where v='a '; +explain select count(*) from t1 where v like 'a%'; +explain select count(*) from t1 where v between 'a' and 'a '; +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +explain select * from t1 where v='a'; + +# GROUP BY + +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + +# +# Test varchar > 255 bytes, key < 255 +# + +alter table t1 drop key v, add key v (v(30)); +show create table t1; +select count(*) from t1 where v='a'; +select count(*) from t1 where v='a '; +select count(*) from t1 where v between 'a' and 'a '; +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +select count(*) from t1 where v like 'a%'; +select count(*) from t1 where v like 'a %'; +explain select count(*) from t1 where v='a '; +explain select count(*) from t1 where v like 'a%'; +explain select count(*) from t1 where v between 'a' and 'a '; +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +explain select * from t1 where v='a'; + +# GROUP BY + +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + +# +# Test varchar > 512 (special case for GROUP BY becasue of +# CONVERT_IF_BIGGER_TO_BLOB define) +# + +alter table t1 modify v varchar(600), drop key v, add key v (v); +show create table t1; +select v,count(*) from t1 group by v limit 10; +select v,count(t) from t1 group by v limit 10; +select sql_big_result v,count(t) from t1 group by v limit 10; + drop table t1; # diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 3570f74065e..337cc570298 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1578,6 +1578,217 @@ f 10 g 10 h 10 i 10 +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +Warnings: +Warning 1071 Specified key was too long; max key length is 255 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(255)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 258 const 10 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 11 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 10 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 258 NULL 10 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 258 const 10 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 drop key v, add key v (v(30)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(30)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 10 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 11 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 10 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 10 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 10 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(600), drop key v, add key v (v); +Warnings: +Warning 1071 Specified key was too long; max key length is 255 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(600) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(255)) +) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1 +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 drop table t1; create table t1 (a char(10), unique (a)); insert into t1 values ('a '); diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index ab6e180e6b7..21627d5c267 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -469,7 +469,7 @@ character_sets CREATE TEMPORARY TABLE `character_sets` ( `DESCRIPTION` varchar(60) NOT NULL default '', `DEFAULT_COLLATE_NAME` varchar(60) NOT NULL default '', `MAXLEN` bigint(3) NOT NULL default '0' -) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2252 +) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2267 set names latin2; SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets; Table Create Table @@ -478,7 +478,7 @@ character_sets CREATE TEMPORARY TABLE `character_sets` ( `DESCRIPTION` varchar(60) NOT NULL default '', `DEFAULT_COLLATE_NAME` varchar(60) NOT NULL default '', `MAXLEN` bigint(3) NOT NULL default '0' -) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2252 +) ENGINE=HEAP DEFAULT CHARSET=utf8 MAX_ROWS=2267 set names latin1; create table t1 select * from information_schema.CHARACTER_SETS where CHARACTER_SET_NAME like "latin1"; diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index 9a6c69b7bea..fe99961f964 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -1421,19 +1421,19 @@ insert t2 select * from t1; insert t3 select * from t1; checksum table t1, t2, t3, t4 quick; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 NULL test.t3 NULL test.t4 NULL checksum table t1, t2, t3, t4; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 968604391 test.t3 968604391 test.t4 NULL checksum table t1, t2, t3, t4 extended; Table Checksum -test.t1 272226711 +test.t1 3092701434 test.t2 968604391 test.t3 968604391 test.t4 NULL diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 0074b2672fc..98020f26e37 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -513,18 +513,18 @@ insert t1 values (1, "aaa", "bbb"), (NULL, "", "ccccc"), (0, NULL, ""); insert t2 select * from t1; checksum table t1, t2, t3 quick; Table Checksum -test.t1 272226711 +test.t1 2948697075 test.t2 NULL test.t3 NULL checksum table t1, t2, t3; Table Checksum -test.t1 272226711 -test.t2 272226711 +test.t1 2948697075 +test.t2 3092701434 test.t3 NULL checksum table t1, t2, t3 extended; Table Checksum -test.t1 272226711 -test.t2 272226711 +test.t1 3092701434 +test.t2 3092701434 test.t3 NULL drop table t1,t2; create table t1 (a int, key (a)); @@ -849,6 +849,213 @@ f 10 g 10 h 10 i 10 +alter table t1 modify v varchar(300), drop key v, drop key v_2, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const 7 Using where; Using index +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 8 Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 7 Using where; Using index +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 303 NULL 7 Using where; Using index +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 303 const 7 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 drop key v, add key v (v(30)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(300) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`(30)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select count(*) from t1 where v='a'; +count(*) +10 +select count(*) from t1 where v='a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a '; +count(*) +10 +select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +count(*) +10 +select count(*) from t1 where v like 'a%'; +count(*) +11 +select count(*) from t1 where v like 'a %'; +count(*) +9 +explain select count(*) from t1 where v='a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 7 Using where +explain select count(*) from t1 where v like 'a%'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 8 Using where +explain select count(*) from t1 where v between 'a' and 'a '; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 7 Using where +explain select count(*) from t1 where v between 'a' and 'a ' and v between 'a ' and 'b\n'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 33 NULL 7 Using where +explain select * from t1 where v='a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 33 const 7 Using where +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +alter table t1 modify v varchar(600), drop key v, add key v (v); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `v` varchar(600) default NULL, + `c` char(10) default NULL, + `t` text, + KEY `c` (`c`), + KEY `t` (`t`(10)), + KEY `v` (`v`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +select v,count(*) from t1 group by v limit 10; +v count(*) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 +select sql_big_result v,count(t) from t1 group by v limit 10; +v count(t) +a 1 +a 10 +b 10 +c 10 +d 10 +e 10 +f 10 +g 10 +h 10 +i 10 drop table t1; create table t1 (a char(10), unique (a)); insert into t1 values ('a '); diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index ef399b6662d..1665dcb655c 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -278,7 +278,7 @@ t2 MyISAM 9 Fixed 0 0 0 64424509439 1024 0 NULL # # # latin1_swedish_ci NULL prepare stmt4 from ' show table status from test like ''t9%'' '; execute stmt4; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t9 MyISAM 10 Dynamic 2 222 444 4294967295 2048 0 NULL # # # latin1_swedish_ci NULL +t9 MyISAM 10 Dynamic 2 220 440 4294967295 2048 0 NULL # # # latin1_swedish_ci NULL prepare stmt4 from ' show status like ''Threads_running'' '; execute stmt4; Variable_name Value diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index 01d82c0ba16..b319cf79ec2 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -824,7 +824,7 @@ select a from t1; drop table t1; # -# bug#2686 - index_merge select on BerkeleyDB table with varchar PK causes mysqld to crash +# bug#2686 - index_merge select on BerkeleyDB table with varchar PK crashes # create table t1( @@ -842,7 +842,8 @@ select substring(pk1, 1, 4), substring(pk1, 4001), drop table t1; # -# bug#2688 - Wrong index_merge query results for BDB table with variable length primary key +# bug#2688 - Wrong index_merge query results for BDB table with +# variable length primary key # create table t1 ( diff --git a/mysys/list.c b/mysys/list.c index 64fca10dc0b..c3cd6c94b9f 100644 --- a/mysys/list.c +++ b/mysys/list.c @@ -28,7 +28,7 @@ LIST *list_add(LIST *root, LIST *element) { DBUG_ENTER("list_add"); - DBUG_PRINT("enter",("root: 0x%lx element: %lx", root, element)); + DBUG_PRINT("enter",("root: 0x%lx element: 0x%lx", root, element)); if (root) { if (root->prev) /* If add in mid of list */ diff --git a/mysys/my_handler.c b/mysys/my_handler.c index cf8bde31e73..78e19b74245 100644 --- a/mysys/my_handler.c +++ b/mysys/my_handler.c @@ -206,7 +206,8 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, b+=length; } break; - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: { int a_length,b_length,pack_length; get_key_length(a_length,a); @@ -228,7 +229,8 @@ int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, break; } break; - case HA_KEYTYPE_VARBINARY: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: { int a_length,b_length,pack_length; get_key_length(a_length,a); diff --git a/mysys/raid.cc b/mysys/raid.cc index 0b688464fb3..1d2e0cb01f0 100644 --- a/mysys/raid.cc +++ b/mysys/raid.cc @@ -185,7 +185,7 @@ extern "C" { uint my_raid_write(File fd,const byte *Buffer, uint Count, myf MyFlags) { DBUG_ENTER("my_raid_write"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("enter",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", fd, Buffer, Count, MyFlags)); if (is_raid(fd)) { @@ -198,7 +198,7 @@ extern "C" { uint my_raid_read(File fd, byte *Buffer, uint Count, myf MyFlags) { DBUG_ENTER("my_raid_read"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u MyFlags: %d", + DBUG_PRINT("enter",("Fd: %d Buffer: 0x%lx Count: %u MyFlags: %d", fd, Buffer, Count, MyFlags)); if (is_raid(fd)) { @@ -212,8 +212,9 @@ extern "C" { myf MyFlags) { DBUG_ENTER("my_raid_pread"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u offset: %u MyFlags: %d", - Filedes, Buffer, Count, offset, MyFlags)); + DBUG_PRINT("enter", + ("Fd: %d Buffer: 0x%lx Count: %u offset: %u MyFlags: %d", + Filedes, Buffer, Count, offset, MyFlags)); if (is_raid(Filedes)) { assert(offset != MY_FILEPOS_ERROR); @@ -231,8 +232,9 @@ extern "C" { my_off_t offset, myf MyFlags) { DBUG_ENTER("my_raid_pwrite"); - DBUG_PRINT("enter",("Fd: %d Buffer: %lx Count: %u offset: %u MyFlags: %d", - Filedes, Buffer, Count, offset, MyFlags)); + DBUG_PRINT("enter", + ("Fd: %d Buffer: 0x %lx Count: %u offset: %u MyFlags: %d", + Filedes, Buffer, Count, offset, MyFlags)); if (is_raid(Filedes)) { assert(offset != MY_FILEPOS_ERROR); diff --git a/sql/field.cc b/sql/field.cc index dafb3dc25da..c26120734c0 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4641,7 +4641,19 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) } /**************************************************************************** -** VARCHAR type (Not available for the end user yet) + VARCHAR type + Data in field->ptr is stored as: + 1 or 2 bytes length-prefix-header (from Field_varstring::length_bytes) + data + + NOTE: + When VARCHAR is stored in a key (for handler::index_read() etc) it's always + stored with a 2 byte prefix. (Just like blob keys). + + Normally length_bytes is calculated as (field_length < 256 : 1 ? 2) + The exception is if there is a prefix key field that is part of a long + VARCHAR, in which case field_length for this may be 1 but the length_bytes + is 2. ****************************************************************************/ @@ -4670,8 +4682,11 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) from,from+length, field_length/ field_charset->mbmaxlen); - memcpy(ptr + HA_KEY_BLOB_LENGTH, from, copy_length); - int2store(ptr, copy_length); + memcpy(ptr + length_bytes, from, copy_length); + if (length_bytes == 1) + *ptr= (uchar) copy_length; + else + int2store(ptr, copy_length); if (copy_length < length) error= 1; @@ -4684,91 +4699,117 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) int Field_varstring::store(longlong nr) { char buff[64]; - int l; - CHARSET_INFO *cs=charset(); - l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); - return Field_varstring::store(buff,(uint)l,cs); + uint length; + length= (uint) (field_charset->cset->longlong10_to_str)(field_charset, + buff, + sizeof(buff), + -10,nr); + return Field_varstring::store(buff, length, field_charset); } double Field_varstring::val_real(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, (char**)0, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntod(field_charset, ptr+length_bytes, length, (char**) 0, + ¬_used); } longlong Field_varstring::val_int(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntoll(field_charset, ptr+length_bytes, length, 10, NULL, + ¬_used); } String *Field_varstring::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { - uint length=uint2korr(ptr); - val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + val_ptr->set((const char*) ptr+length_bytes, length, field_charset); return val_ptr; } int Field_varstring::cmp(const char *a_ptr, const char *b_ptr) { - uint a_length=uint2korr(a_ptr); - uint b_length=uint2korr(b_ptr); + uint a_length, b_length; int diff; + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } diff= field_charset->coll->strnncollsp(field_charset, (const uchar*) a_ptr+ - HA_KEY_BLOB_LENGTH, + length_bytes, a_length, (const uchar*) b_ptr+ - HA_KEY_BLOB_LENGTH, + length_bytes, b_length,0); return diff; } +/* + NOTE: varstring and blob keys are ALWAYS stored with a 2 byte length prefix +*/ + int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length) { char *blob1; - uint length= uint2korr(ptr); - CHARSET_INFO *cs= charset(); - uint char_length= max_key_length / cs->mbmaxlen; + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint char_length= max_key_length / field_charset->mbmaxlen; - char_length= my_charpos(cs, ptr + HA_KEY_BLOB_LENGTH, - ptr + HA_KEY_BLOB_LENGTH + length, char_length); + char_length= my_charpos(field_charset, ptr + length_bytes, + ptr + length_bytes + length, char_length); set_if_smaller(length, char_length); - return cs->coll->strnncollsp(cs, - (const uchar*) ptr+2, length, - (const uchar*) key_ptr+HA_KEY_BLOB_LENGTH, - uint2korr(key_ptr), 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr + length_bytes, + length, + (const uchar*) key_ptr+ + HA_KEY_BLOB_LENGTH, + uint2korr(key_ptr), 0); } +/* + Compare to key segments (always 2 byte length prefix) + + NOTE + This is used only to compare key segments created for index_read(). + (keys are created and compared in key.cc) +*/ + int Field_varstring::key_cmp(const byte *a,const byte *b) { - CHARSET_INFO *cs= charset(); - return cs->coll->strnncollsp(cs, - (const uchar*) a + HA_KEY_BLOB_LENGTH, - uint2korr(a), - (const uchar*) b + HA_KEY_BLOB_LENGTH, - uint2korr(b), - 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a + + HA_KEY_BLOB_LENGTH, + uint2korr(a), + (const uchar*) b + + HA_KEY_BLOB_LENGTH, + uint2korr(b), + 0); } void Field_varstring::sort_string(char *to,uint length) { - uint tot_length= uint2korr(ptr); + uint tot_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); tot_length= my_strnxfrm(field_charset, (uchar*) to, length, - (uchar*) ptr+HA_KEY_BLOB_LENGTH, + (uchar*) ptr + length_bytes, tot_length); if (tot_length < length) field_charset->cset->fill(field_charset, to+tot_length,length-tot_length, @@ -4776,6 +4817,18 @@ void Field_varstring::sort_string(char *to,uint length) } +enum ha_base_keytype Field_varstring::key_type() const +{ + enum ha_base_keytype res; + + if (binary()) + res= length_bytes == 1 ? HA_KEYTYPE_VARBINARY1 : HA_KEYTYPE_VARBINARY2; + else + res= length_bytes == 1 ? HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2; + return res; +} + + void Field_varstring::sql_type(String &res) const { THD *thd= table->in_use; @@ -4793,13 +4846,102 @@ void Field_varstring::sql_type(String &res) const } +/* + Functions to create a packed row. + Here the number of length bytes are depending on the given max_length +*/ + char *Field_varstring::pack(char *to, const char *from, uint max_length) { - uint length=uint2korr(from); + uint length= length_bytes == 1 ? (uint) (uchar) *from : uint2korr(from); set_if_smaller(max_length, field_length); if (length > max_length) length=max_length; *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, from+length_bytes, length); + return to+length; +} + + +char *Field_varstring::pack_key(char *to, const char *key, uint max_length) +{ + uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key); + uint char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); + key+= length_bytes; + if (length > char_length) + { + char_length= my_charpos(field_charset, key, key+length, char_length); + set_if_smaller(length, char_length); + } + *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, key, length); + return to+length; +} + + +/* + Unpack a key into a record buffer. + + SYNOPSIS + unpack_key() + to Pointer into the record buffer. + key Pointer to the packed key. + max_length Key length limit from key description. + + DESCRIPTION + A VARCHAR key has a maximum size of 64K-1. + In its packed form, the length field is one or two bytes long, + depending on 'max_length'. + + RETURN + Pointer to end of 'key' (To the next key part if multi-segment key) +*/ + +const char *Field_varstring::unpack_key(char *to, const char *key, + uint max_length) +{ + /* get length of the blob key */ + uint32 length= *((uchar*) key++); + if (max_length > 255) + length+= (*((uchar*) key++)) << 8; + + /* put the length into the record buffer */ + if (length_bytes == 1) + *ptr= (uchar) length; + else + int2store(ptr, length); + memcpy(ptr + length_bytes, key, length); + return key + length; +} + +/* + Create a packed key that will be used for storage in the index tree + + SYNOPSIS + pack_key_from_key_image() + to Store packed key segment here + from Key segment (as given to index_read()) + max_length Max length of key + + RETURN + end of key storage +*/ + +char *Field_varstring::pack_key_from_key_image(char *to, const char *from, + uint max_length) +{ + /* Key length is always stored as 2 bytes */ + uint length= uint2korr(from); + if (length > max_length) + length= max_length; + *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) @@ -4808,34 +4950,15 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length) } -char *Field_varstring::pack_key(char *to, const char *from, uint max_length) -{ - uint length=uint2korr(from); - uint char_length= ((field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length); - from+= HA_KEY_BLOB_LENGTH; - if (length > char_length) - { - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); - } - *to++= (char) (length & 255); - if (max_length > 255) - *to++= (char) (length >> 8); - if (length) - memcpy(to, from, length); - return to+length; -} - +/* + unpack field packed with Field_varstring::pack() +*/ const char *Field_varstring::unpack(char *to, const char *from) { uint length; - if (field_length <= 255) - { + if (length_bytes == 1) length= (uint) (uchar) (*to= *from++); - to[1]=0; - } else { length= uint2korr(from); @@ -4843,7 +4966,7 @@ const char *Field_varstring::unpack(char *to, const char *from) to[1]= *from++; } if (length) - memcpy(to+HA_KEY_BLOB_LENGTH, from, length); + memcpy(to+ length_bytes, from, length); return from+length; } @@ -4851,12 +4974,11 @@ const char *Field_varstring::unpack(char *to, const char *from) int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { - a_length=uint2korr(a); a+= HA_KEY_BLOB_LENGTH; - b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH; + a_length=uint2korr(a); a+= 2; + b_length=uint2korr(b); b+= 2; } else { @@ -4873,8 +4995,8 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length, int Field_varstring::pack_cmp(const char *b, uint key_length, my_bool insert_or_update) { - char *a= ptr+HA_KEY_BLOB_LENGTH; - uint a_length= uint2korr(ptr); + char *a= ptr+ length_bytes; + uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint b_length; uint char_length= ((field_charset->mbmaxlen > 1) ? key_length / field_charset->mbmaxlen : key_length); @@ -4903,7 +5025,7 @@ int Field_varstring::pack_cmp(const char *b, uint key_length, uint Field_varstring::packed_col_length(const char *data_ptr, uint length) { if (length > 255) - return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH; + return uint2korr(data_ptr)+2; return (uint) ((uchar) *data_ptr)+1; } @@ -4916,13 +5038,14 @@ uint Field_varstring::max_packed_col_length(uint max_length) void Field_varstring::get_key_image(char *buff, uint length, imagetype type) { - uint f_length= uint2korr(ptr); + uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint char_length= length / field_charset->mbmaxlen; - char_length= my_charpos(field_charset, ptr, ptr + HA_KEY_BLOB_LENGTH, + char_length= my_charpos(field_charset, ptr, ptr + length_bytes, char_length); set_if_smaller(f_length, char_length); + /* Key is always stored with 2 bytes */ int2store(buff,f_length); - memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, f_length); + memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+length_bytes, f_length); if (f_length < length) { /* @@ -4936,18 +5059,12 @@ void Field_varstring::get_key_image(char *buff, uint length, imagetype type) void Field_varstring::set_key_image(char *buff,uint length) { - length=uint2korr(buff); // Real length is here + length= uint2korr(buff); // Real length is here (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, field_charset); } -int Field_varstring::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -4955,13 +5072,41 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, uint diff; uint32 a_length,b_length; - a_length= uint2korr(a_ptr); - b_length= uint2korr(b_ptr); + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } set_if_smaller(a_length, max_length); set_if_smaller(b_length, max_length); if (a_length != b_length) return 1; - return memcmp(a_ptr+2, b_ptr+2, a_length); + return memcmp(a_ptr+length_bytes, b_ptr+length_bytes, a_length); +} + + +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) +{ + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + if (res) + res->length_bytes= length_bytes; + return res; +} + + +Field *Field_varstring::new_key_field(MEM_ROOT *root, + struct st_table *new_table) +{ + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + /* Keys length prefixes are always packed with 2 bytes */ + if (res) + res->length_bytes= 2; + return res; } @@ -5218,18 +5363,6 @@ int Field_blob::cmp(const char *a_ptr, const char *b_ptr) } -int Field_blob::cmp_offset(uint row_offset) -{ - return Field_blob::cmp(ptr,ptr+row_offset); -} - - -int Field_blob::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -5416,8 +5549,7 @@ const char *Field_blob::unpack(char *to, const char *from) int Field_blob::pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { a_length=uint2korr(a); a+=2; @@ -5523,6 +5655,7 @@ const char *Field_blob::unpack_key(char *to, const char *from, uint max_length) return from + length; } + /* Create a packed key that will be used for storage from a MySQL key */ char *Field_blob::pack_key_from_key_image(char *to, const char *from, @@ -6047,15 +6180,17 @@ void create_field::create_length_to_internal_length(void) case MYSQL_TYPE_STRING: case MYSQL_TYPE_VARCHAR: length*= charset->mbmaxlen; - key_length*= charset->mbmaxlen; + key_length= length; pack_length= calc_pack_length(sql_type, length); break; case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: + /* Pack_length already calculated in sql_parse.cc */ length*= charset->mbmaxlen; + key_length= pack_length; break; default: - /* do nothing */ + key_length= pack_length= calc_pack_length(sql_type, length); break; } } @@ -6086,7 +6221,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: case FIELD_TYPE_DECIMAL: return (length); - case MYSQL_TYPE_VARCHAR: return (length+HA_KEY_BLOB_LENGTH); + case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2)); case FIELD_TYPE_YEAR: case FIELD_TYPE_TINY : return 1; case FIELD_TYPE_SHORT : return 2; @@ -6166,7 +6301,9 @@ Field *make_field(char *ptr, uint32 field_length, unireg_check, field_name, table, field_charset); if (field_type == MYSQL_TYPE_VARCHAR) - return new Field_varstring(ptr,field_length,null_pos,null_bit, + return new Field_varstring(ptr,field_length, + HA_VARCHAR_PACKLENGTH(field_length), + null_pos,null_bit, unireg_check, field_name, table, field_charset); return 0; // Error @@ -6313,9 +6450,8 @@ create_field::create_field(Field *old_field,Field *orig_field) case MYSQL_TYPE_SET: case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: - /* These are corrected in create_length_to_internal_length */ + /* This is corrected in create_length_to_internal_length */ length= (length+charset->mbmaxlen-1) / charset->mbmaxlen; - key_length/= charset->mbmaxlen; break; #ifdef HAVE_SPATIAL case FIELD_TYPE_GEOMETRY: diff --git a/sql/field.h b/sql/field.h index 4353780f9a4..521bfb818b0 100644 --- a/sql/field.h +++ b/sql/field.h @@ -139,10 +139,9 @@ public: virtual int cmp(const char *,const char *)=0; virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L) { return memcmp(a,b,pack_length()); } - virtual int cmp_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } - virtual int cmp_binary_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + int cmp_offset(uint row_offset) { return cmp(ptr,ptr+row_offset); } + int cmp_binary_offset(uint row_offset) + { return cmp_binary(ptr, ptr+row_offset); }; virtual int key_cmp(const byte *a,const byte *b) { return cmp((char*) a,(char*) b); } virtual int key_cmp(const byte *str, uint length) @@ -185,6 +184,10 @@ public: virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table) + { + return new_field(root, new_table); + } inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; @@ -925,26 +928,31 @@ public: class Field_varstring :public Field_str { public: - Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, + /* Store number of bytes used to store length (1 or 2) */ + uint32 length_bytes; + Field_varstring(char *ptr_arg, + uint32 len_arg, uint length_bytes_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, cs) + unireg_check_arg, field_name_arg, table_arg, cs), + length_bytes(length_bytes_arg) {} Field_varstring(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) + NONE, field_name_arg, table_arg, cs), + length_bytes(len_arg < 256 ? 1 :2) {} enum_field_types type() const { return MYSQL_TYPE_VARCHAR; } - enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + enum ha_base_keytype key_type() const; bool zero_pack() const { return 0; } - void reset(void) { bzero(ptr,field_length+2); } - uint32 pack_length() const { return (uint32) field_length+2; } + void reset(void) { bzero(ptr,field_length+length_bytes); } + uint32 pack_length() const { return (uint32) field_length+length_bytes; } uint32 key_length() const { return (uint32) field_length; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(longlong nr); @@ -959,12 +967,13 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); char *pack_key(char *to, const char *from, uint max_length); + char *pack_key_from_key_image(char* to, const char *from, uint max_length); const char *unpack(char* to, const char *from); + const char *unpack_key(char* to, const char *from, uint max_length); int pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update); int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); @@ -974,6 +983,8 @@ public: bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table); }; @@ -996,7 +1007,7 @@ public: } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -1005,9 +1016,7 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length); - int cmp_offset(uint offset); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint32 key_length() const { return 0; } @@ -1054,9 +1063,9 @@ public: return 0; } char *pack(char *to, const char *from, uint max_length= ~(uint) 0); - const char *unpack(char *to, const char *from); char *pack_key(char *to, const char *from, uint max_length); char *pack_key_from_key_image(char* to, const char *from, uint max_length); + const char *unpack(char *to, const char *from); const char *unpack_key(char* to, const char *from, uint max_length); int pack_cmp(const char *a, const char *b, uint key_length, my_bool insert_or_update); @@ -1091,7 +1100,7 @@ public: :Field_blob(len_arg, maybe_null_arg, field_name_arg, table_arg, &my_charset_bin) { geom_type= geom_type_arg; } - enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; } enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } void sql_type(String &str) const; int store(const char *to, uint length, CHARSET_INFO *charset); diff --git a/sql/field_conv.cc b/sql/field_conv.cc index f6cc851639a..1a175b95bdc 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -305,7 +305,8 @@ static void do_field_string(Copy_field *copy) char buff[MAX_FIELD_WIDTH]; copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); copy->from_field->val_str(©->tmp); - copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset()); + copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(), + copy->tmp.charset()); } @@ -350,7 +351,23 @@ static void do_expand_string(Copy_field *copy) copy->to_length-copy->from_length, ' '); } -static void do_varstring(Copy_field *copy) + +static void do_varstring1(Copy_field *copy) +{ + uint length= (uint) *(uchar*) copy->from_ptr; + if (length > copy->to_length- 1) + { + length=copy->to_length - 1; + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); + } + *(uchar*) copy->to_ptr= (uchar) length; + memcpy(copy->to_ptr+1, copy->from_ptr + 1, length); +} + + +static void do_varstring2(Copy_field *copy) { uint length=uint2korr(copy->from_ptr); if (length > copy->to_length- HA_KEY_BLOB_LENGTH) @@ -505,9 +522,15 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else if (to->charset() != from->charset()) return do_field_string; - else if (to->real_type() == MYSQL_TYPE_VARCHAR && to_length != - from_length) - return do_varstring; + else if (to->real_type() == MYSQL_TYPE_VARCHAR) + { + if (((Field_varstring*) to)->length_bytes != + ((Field_varstring*) from)->length_bytes) + return do_field_string; + if (to_length != from_length) + return (((Field_varstring*) to)->length_bytes == 1 ? + do_varstring1 : do_varstring2); + } else if (to_length < from_length) return do_cut_string; else if (to_length > from_length) @@ -587,6 +610,12 @@ void field_conv(Field *to,Field *from) char buff[MAX_FIELD_WIDTH]; String result(buff,sizeof(buff),from->charset()); from->val_str(&result); + /* + We use c_ptr_quick() here to make it easier if to is a float/double + as the conversion routines will do a copy of the result doesn't + end with \0. Can be replaced with .ptr() when we have our own + string->double conversion. + */ to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 6cb83624eff..322126ff47b 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -356,7 +356,8 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const } switch (table->key_info[idx].key_part[i].field->key_type()) { case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: /* As BDB stores only one copy of equal strings, we can't use key read on these. Binary collations do support key read though. @@ -391,6 +392,7 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; uint key_length=new_key->size; + DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size); for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; @@ -745,11 +747,11 @@ void ha_berkeley::unpack_row(char *record, DBT *row) void ha_berkeley::unpack_key(char *record, DBT *key, uint index) { - KEY *key_info=table->key_info+index; + KEY *key_info= table->key_info+index; KEY_PART_INFO *key_part= key_info->key_part, - *end=key_part+key_info->key_parts; + *end= key_part+key_info->key_parts; + char *pos= (char*) key->data; - char *pos=(char*) key->data; for (; key_part != end; key_part++) { if (key_part->null_bit) @@ -773,8 +775,10 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index) /* - Create a packed key from from a row - This will never fail as the key buffer is pre allocated. + Create a packed key from a row. This key will be written as such + to the index tree. + + This will never fail as the key buffer is pre-allocated. */ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, @@ -820,7 +824,10 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, /* - Create a packed key from from a MySQL unpacked key + Create a packed key from from a MySQL unpacked key (like the one that is + sent from the index_read() + + This key is to be used to read a row */ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, @@ -1457,7 +1464,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; active_index=MAX_KEY; @@ -1476,10 +1483,9 @@ int ha_berkeley::index_read(byte * buf, const byte * key, int error; KEY *key_info= &table->key_info[active_index]; int do_prev= 0; - DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; bzero((char*) &row,sizeof(row)); if (find_flag == HA_READ_BEFORE_KEY) { @@ -1679,6 +1685,7 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) pos+=key_part->field->packed_col_length((char*) pos,key_part->length); to->size= (uint) (pos- (byte*) to->data); } + DBUG_DUMP("key", (char*) to->data, to->size); return to; } diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 60555d51402..1556a18bfca 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -488,8 +488,10 @@ int ha_heap::create(const char *name, TABLE *table_arg, else { if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && - seg->type != HA_KEYTYPE_VARTEXT && - seg->type != HA_KEYTYPE_VARBINARY) + seg->type != HA_KEYTYPE_VARTEXT1 && + seg->type != HA_KEYTYPE_VARTEXT2 && + seg->type != HA_KEYTYPE_VARBINARY1 && + seg->type != HA_KEYTYPE_VARBINARY2) seg->type= HA_KEYTYPE_BINARY; } seg->start= (uint) key_part->offset; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 726647cd131..dab3ea16377 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -926,8 +926,11 @@ int ha_myisam::enable_indexes(uint mode) { sql_print_warning("Warning: Enabling keys got errno %d, retrying", my_errno); + thd->clear_error(); param.testflag&= ~(T_REP_BY_SORT | T_QUICK); error= (repair(thd,param,0) != HA_ADMIN_OK); + if (!error && thd->net.report_error) + error= HA_ERR_CRASHED; } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; @@ -1471,11 +1474,10 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, break; if (found->flags & BLOB_FLAG) - { recinfo_pos->type= (int) FIELD_BLOB; - } - else if (!(options & HA_OPTION_PACK_RECORD) || - found->type() == MYSQL_TYPE_VARCHAR) + else if (found->type() == MYSQL_TYPE_VARCHAR) + recinfo_pos->type= FIELD_VARCHAR; + else if (!(options & HA_OPTION_PACK_RECORD)) recinfo_pos->type= (int) FIELD_NORMAL; else if (found->zero_pack()) recinfo_pos->type= (int) FIELD_SKIP_ZERO; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index b242698d36e..949545bcdb0 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1214,7 +1214,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) { Field* f = *field; int len = *lengths++; - int res = f->key_cmp(key1, key2); + int res = f->cmp(key1, key2); if (res) return res; key1 += len; @@ -1668,7 +1668,7 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return res; } } @@ -1702,7 +1702,7 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return (*order_item)->asc ? res : -res; } } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index f9149f10a30..0f54f06a22b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -746,7 +746,7 @@ int QUICK_RANGE_SELECT::init() void QUICK_RANGE_SELECT::range_end() { if (file->inited != handler::NONE) - file->ha_index_end(); + file->ha_index_or_rnd_end(); } @@ -3687,7 +3687,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, } /* Get local copy of key */ copies= 1; - if (field->key_type() == HA_KEYTYPE_VARTEXT) + if (field->key_type() == HA_KEYTYPE_VARTEXT1 || + field->key_type() == HA_KEYTYPE_VARTEXT2) copies= 2; str= str2= (char*) alloc_root(param->mem_root, (key_part->store_length)*copies+1); @@ -5888,7 +5889,7 @@ int QUICK_RANGE_SELECT::get_next() SYNOPSIS QUICK_RANGE_SELECT::get_next_prefix() prefix_length length of cur_prefix - cur_prefix prefix of a key to be searached for + cur_prefix prefix of a key to be searched for DESCRIPTION Each subsequent call to the method retrieves the first record that has a @@ -7402,7 +7403,8 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, quick->quick_prefix_select= NULL; /* Can't construct a quick select. */ else /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */ - quick->quick_prefix_select= get_quick_select(param, param_idx, index_tree, + quick->quick_prefix_select= get_quick_select(param, param_idx, + index_tree, &quick->alloc); /* diff --git a/sql/sql_base.cc b/sql/sql_base.cc index b4a2f368bc2..2500769ee30 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2467,6 +2467,8 @@ find_item_in_list(Item *find, List &items, uint *counter, bool found_unaliased_non_uniq= 0; uint unaliased_counter; + LINT_INIT(unaliased_counter); // Dependent on found_unaliased + *unaliased= FALSE; if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 3d4252a2b17..4810756c40d 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4891,11 +4891,9 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, break; case MYSQL_TYPE_VARCHAR: /* - We can't use pack_length as this includes the field length Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table if they don't have a default value */ - new_field->key_length= new_field->length; max_field_charlength= MAX_FIELD_VARCHARLENGTH; break; case MYSQL_TYPE_STRING: @@ -5083,16 +5081,12 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name); DBUG_RETURN(1); } - if (!new_field->pack_length) - new_field->pack_length= calc_pack_length(new_field->sql_type, - new_field->length); - if (!new_field->key_length) - new_field->key_length= new_field->pack_length; lex->create_list.push_back(new_field); lex->last_field=new_field; DBUG_RETURN(0); } + /* Store position for column in ALTER TABLE .. ADD column */ void store_position_for_column(const char *name) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ed3606856a0..5f69074adcc 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -235,9 +235,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result) res|= thd->net.report_error; if (unlikely(res)) { - /* - If we have real error reported erly then this will be ignored - */ + /* If we had a another error reported earlier then this will be ignored */ result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR)); result->abort(); } @@ -4873,7 +4871,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, &keyinfo->key_part[i], (char*) key_buff,maybe_null); /* - Remeber if we are going to use REF_OR_NULL + Remember if we are going to use REF_OR_NULL But only if field _really_ can be null i.e. we force JT_REF instead of JT_REF_OR_NULL in case if field can't be null */ @@ -7538,7 +7536,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, { Field *new_field; - if (convert_blob_length && org_field->flags & BLOB_FLAG) + if (convert_blob_length && (org_field->flags & BLOB_FLAG)) new_field= new Field_varstring(convert_blob_length, org_field->maybe_null(), org_field->field_name, table, @@ -7805,7 +7803,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, else for (ORDER *tmp=group ; tmp ; tmp=tmp->next) { (*tmp->item)->marker=4; // Store null in key - if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH) + if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB) using_unique_constraint=1; } if (param->group_length >= MAX_BLOB_WIDTH) @@ -8147,16 +8145,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, key_part_info->null_bit=0; key_part_info->field= field; key_part_info->offset= field->offset(); - key_part_info->length= (uint16) field->pack_length(); + key_part_info->length= (uint16) field->key_length(); key_part_info->type= (uint8) field->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; if (!using_unique_constraint) { group->buff=(char*) group_buff; - if (!(group->field=field->new_field(thd->mem_root,table))) + if (!(group->field=field->new_key_field(thd->mem_root,table))) goto err; /* purecov: inspected */ if (maybe_null) { @@ -8177,7 +8176,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, group->field->move_field((char*) group_buff); /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; - group_buff+= key_part_info->length; + group_buff+= group->field->pack_length(); } keyinfo->key_length+= key_part_info->length; } @@ -8241,7 +8240,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, key_part_info->type= (uint8) (*reg_field)->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; } } @@ -8291,8 +8291,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, MI_KEYDEF keydef; MI_UNIQUEDEF uniquedef; KEY *keyinfo=param->keyinfo; - DBUG_ENTER("create_myisam_tmp_table"); + if (table->keys) { // Get keys for ni_create bool using_unique_constraint=0; @@ -8340,19 +8340,18 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, { seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT); - seg->bit_start=seg->length - table->blob_ptr_size; + HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); + seg->bit_start= field->pack_length() - table->blob_ptr_size; seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } else { - seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT); + seg->type= keyinfo->key_part[i].type; /* Tell handler if it can do suffic space compression */ if (field->real_type() == MYSQL_TYPE_STRING && keyinfo->key_part[i].length > 4) - seg->flag|=HA_SPACE_PACK; + seg->flag|= HA_SPACE_PACK; } if (!(field->flags & NOT_NULL_FLAG)) { @@ -8361,7 +8360,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, /* We are using a GROUP BY on something that contains NULL In this case we have to tell MyISAM that two NULL should - on INSERT be compared as equal + on INSERT be regarded at the same value */ if (!using_unique_constraint) keydef.flag|= HA_NULL_ARE_EQUAL; @@ -8645,21 +8644,19 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) } if (table) { - int tmp; + int tmp, new_errno= 0; if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE))) { DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } if ((tmp=table->file->ha_index_or_rnd_end())) { DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } - if (error == -1) - table->file->print_error(my_errno,MYF(0)); + if (new_errno) + table->file->print_error(new_errno,MYF(0)); } #ifndef DBUG_OFF if (error) @@ -9831,13 +9828,19 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); } - /* The null bits are already set */ + /* + Copy null bits from group key to table + We can't copy all data as the key may have different format + as the row data (for example as with VARCHAR keys) + */ KEY_PART_INFO *key_part; for (group=table->group,key_part=table->key_info[0].key_part; group ; group=group->next,key_part++) - memcpy(table->record[0]+key_part->offset, group->buff, key_part->length); - + { + if (key_part->null_bit) + memcpy(table->record[0]+key_part->offset, group->buff, 1); + } init_tmptable_sum_functions(join->sum_funcs); copy_funcs(join->tmp_table_param.items_to_copy); if ((error=table->file->write_row(table->record[0]))) @@ -11647,8 +11650,10 @@ calc_group_buffer(JOIN *join,ORDER *group) { if (field->type() == FIELD_TYPE_BLOB) key_length+=MAX_BLOB_WIDTH; // Can't be used as a key + else if (field->type() == MYSQL_TYPE_VARCHAR) + key_length+= field->field_length + HA_KEY_BLOB_LENGTH; else - key_length+=field->pack_length(); + key_length+= field->pack_length(); } else if ((*group->item)->result_type() == REAL_RESULT) key_length+=sizeof(double); diff --git a/sql/sql_select.h b/sql/sql_select.h index 5e42fc0ee30..be3a72836b4 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -420,12 +420,15 @@ class store_key :public Sql_alloc :null_ptr(null),err(0) { if (field_arg->type() == FIELD_TYPE_BLOB) - to_field=new Field_varstring(ptr, length, (uchar*) null, 1, + { + /* Key segments are always packed with a 2 byte length prefix */ + to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1, Field::NONE, field_arg->field_name, field_arg->table, field_arg->charset()); + } else { - to_field=field_arg->new_field(thd->mem_root,field_arg->table); + to_field=field_arg->new_key_field(thd->mem_root, field_arg->table); if (to_field) to_field->move_field(ptr, (uchar*) null, 1); } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 71467664085..0a69ddcb40b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3319,7 +3319,7 @@ int make_schema_select(THD *thd, SELECT_LEX *sel, /* - Fill temporaty schema tables before SELECT + Fill temporary schema tables before SELECT SYNOPSIS get_schema_tables_result() diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 56605d1c6e0..ddd713d47cd 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -508,7 +508,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, String conv, *tmp; for (uint i= 0; (tmp= it++); i++) { - if (String::needs_conversion(tmp->length(), tmp->charset(), cs, &dummy)) + if (String::needs_conversion(tmp->length(), tmp->charset(), cs, + &dummy)) { uint cnv_errs; conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); @@ -3686,7 +3687,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); delete [] copy; // This is never 0 - if (to->file->end_bulk_insert() && !error) + if (to->file->end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error=1; diff --git a/vio/vio.c b/vio/vio.c index a356d8edeff..39b5f843e5e 100644 --- a/vio/vio.c +++ b/vio/vio.c @@ -32,7 +32,8 @@ void vio_reset(Vio* vio, enum enum_vio_type type, my_bool localhost) { DBUG_ENTER("vio_reset"); - DBUG_PRINT("enter", ("type=%d sd=%d localhost=%d", type, sd, localhost)); + DBUG_PRINT("enter", ("type: %d sd: %d localhost: %d", type, sd, + localhost)); bzero((char*) vio, sizeof(*vio)); vio->type = type; @@ -123,7 +124,7 @@ Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost) { Vio *vio; DBUG_ENTER("vio_new"); - DBUG_PRINT("enter", ("sd=%d", sd)); + DBUG_PRINT("enter", ("sd: %d", sd)); if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME)))) { vio_reset(vio, type, sd, 0, localhost); diff --git a/vio/viosocket.c b/vio/viosocket.c index 48a9058480a..2921eb7495e 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -33,7 +33,7 @@ int vio_read(Vio * vio, gptr buf, int size) { int r; DBUG_ENTER("vio_read"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); #ifdef __WIN__ r = recv(vio->sd, buf, size,0); @@ -56,7 +56,7 @@ int vio_write(Vio * vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_write"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); #ifdef __WIN__ r = send(vio->sd, buf, size,0); #else @@ -168,7 +168,7 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive) int r=0; uint opt = 0; DBUG_ENTER("vio_keepalive"); - DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int) + DBUG_PRINT("enter", ("sd: %d, set_keep_alive: %d", vio->sd, (int) set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE) { @@ -315,7 +315,7 @@ int vio_read_pipe(Vio * vio, gptr buf, int size) { DWORD length; DBUG_ENTER("vio_read_pipe"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if (!ReadFile(vio->hPipe, buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -329,7 +329,7 @@ int vio_write_pipe(Vio * vio, const gptr buf, int size) { DWORD length; DBUG_ENTER("vio_write_pipe"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if (!WriteFile(vio->hPipe, (char*) buf, size, &length, NULL)) DBUG_RETURN(-1); @@ -373,7 +373,7 @@ int vio_read_shared_memory(Vio * vio, gptr buf, int size) char *current_postion; DBUG_ENTER("vio_read_shared_memory"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); remain_local = size; current_postion=buf; @@ -423,7 +423,7 @@ int vio_write_shared_memory(Vio * vio, const gptr buf, int size) char *current_postion; DBUG_ENTER("vio_write_shared_memory"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); remain = size; current_postion = buf; diff --git a/vio/viossl.c b/vio/viossl.c index a489cb98f98..912365adca0 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -99,7 +99,7 @@ int vio_ssl_read(Vio * vio, gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_read"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d, ssl_=%p", + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d, ssl_: 0x%p", vio->sd, buf, size, vio->ssl_arg)); if ((r= SSL_read((SSL*) vio->ssl_arg, buf, size)) < 0) @@ -117,7 +117,7 @@ int vio_ssl_write(Vio * vio, const gptr buf, int size) { int r; DBUG_ENTER("vio_ssl_write"); - DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size)); + DBUG_PRINT("enter", ("sd: %d, buf: 0x%p, size: %d", vio->sd, buf, size)); if ((r= SSL_write((SSL*) vio->ssl_arg, buf, size)) < 0) report_errors(); @@ -157,7 +157,7 @@ int vio_ssl_keepalive(Vio* vio, my_bool set_keep_alive) { int r=0; DBUG_ENTER("vio_ssl_keepalive"); - DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int) + DBUG_PRINT("enter", ("sd: %d, set_keep_alive: %d", vio->sd, (int) set_keep_alive)); if (vio->type != VIO_TYPE_NAMEDPIPE) { @@ -227,7 +227,7 @@ my_socket vio_ssl_fd(Vio* vio) my_bool vio_ssl_peer_addr(Vio * vio, char *buf, uint16 *port) { DBUG_ENTER("vio_ssl_peer_addr"); - DBUG_PRINT("enter", ("sd=%d", vio->sd)); + DBUG_PRINT("enter", ("sd: %d", vio->sd)); if (vio->localhost) { strmov(buf,"127.0.0.1"); @@ -250,7 +250,7 @@ my_bool vio_ssl_peer_addr(Vio * vio, char *buf, uint16 *port) *port= 0; #endif } - DBUG_PRINT("exit", ("addr=%s", buf)); + DBUG_PRINT("exit", ("addr: %s", buf)); DBUG_RETURN(0); } @@ -279,7 +279,7 @@ int sslaccept(struct st_VioSSLAcceptorFd* ptr, Vio* vio, long timeout) my_bool net_blocking; enum enum_vio_type old_type; DBUG_ENTER("sslaccept"); - DBUG_PRINT("enter", ("sd=%d ptr=%p", vio->sd,ptr)); + DBUG_PRINT("enter", ("sd: %d ptr: Ox%p", vio->sd,ptr)); old_type= vio->type; net_blocking = vio_is_blocking(vio); @@ -294,7 +294,8 @@ int sslaccept(struct st_VioSSLAcceptorFd* ptr, Vio* vio, long timeout) vio_blocking(vio, net_blocking, &unused); DBUG_RETURN(1); } - DBUG_PRINT("info", ("ssl_=%p timeout=%ld",(SSL*) vio->ssl_arg, timeout)); + DBUG_PRINT("info", ("ssl_: Ox%p timeout: %ld", + (SSL*) vio->ssl_arg, timeout)); SSL_clear((SSL*) vio->ssl_arg); SSL_SESSION_set_timeout(SSL_get_session((SSL*) vio->ssl_arg), timeout); SSL_set_fd((SSL*) vio->ssl_arg,vio->sd); @@ -352,7 +353,8 @@ int sslconnect(struct st_VioSSLConnectorFd* ptr, Vio* vio, long timeout) my_bool net_blocking; enum enum_vio_type old_type; DBUG_ENTER("sslconnect"); - DBUG_PRINT("enter", ("sd=%d ptr=%p ctx: %p", vio->sd,ptr,ptr->ssl_context)); + DBUG_PRINT("enter", ("sd: %d ptr: 0x%p ctx: 0x%p", + vio->sd,ptr,ptr->ssl_context)); old_type= vio->type; net_blocking = vio_is_blocking(vio); @@ -367,7 +369,8 @@ int sslconnect(struct st_VioSSLConnectorFd* ptr, Vio* vio, long timeout) vio_blocking(vio, net_blocking, &unused); DBUG_RETURN(1); } - DBUG_PRINT("info", ("ssl_=%p timeout=%ld",(SSL*) vio->ssl_arg, timeout)); + DBUG_PRINT("info", ("ssl_: 0x%p timeout: %ld", + (SSL*) vio->ssl_arg, timeout)); SSL_clear((SSL*) vio->ssl_arg); SSL_SESSION_set_timeout(SSL_get_session((SSL*) vio->ssl_arg), timeout); SSL_set_fd ((SSL*) vio->ssl_arg, vio->sd); diff --git a/vio/viosslfactories.c b/vio/viosslfactories.c index 498d10da0ee..44a077c33fc 100644 --- a/vio/viosslfactories.c +++ b/vio/viosslfactories.c @@ -80,7 +80,7 @@ static int vio_set_cert_stuff(SSL_CTX *ctx, const char *cert_file, const char *key_file) { DBUG_ENTER("vio_set_cert_stuff"); - DBUG_PRINT("enter", ("ctx=%p, cert_file=%s, key_file=%s", + DBUG_PRINT("enter", ("ctx: %p, cert_file: %s, key_file: %s", ctx, cert_file, key_file)); if (cert_file != NULL) { @@ -131,7 +131,7 @@ vio_verify_callback(int ok, X509_STORE_CTX *ctx) int err,depth; DBUG_ENTER("vio_verify_callback"); - DBUG_PRINT("enter", ("ok=%d, ctx=%p", ok, ctx)); + DBUG_PRINT("enter", ("ok: %d, ctx: 0x%p", ok, ctx)); err_cert=X509_STORE_CTX_get_current_cert(ctx); err= X509_STORE_CTX_get_error(ctx); depth= X509_STORE_CTX_get_error_depth(ctx); @@ -220,7 +220,7 @@ new_VioSSLConnectorFd(const char* key_file, DH *dh; DBUG_ENTER("new_VioSSLConnectorFd"); DBUG_PRINT("enter", - ("key_file=%s, cert_file=%s, ca_path=%s, ca_file=%s, cipher=%s", + ("key_file: %s, cert_file: %s, ca_path: %s, ca_file: %s, cipher: %s", key_file, cert_file, ca_path, ca_file, cipher)); if (!(ptr=((struct st_VioSSLConnectorFd*) @@ -315,7 +315,7 @@ new_VioSSLAcceptorFd(const char *key_file, DH *dh; DBUG_ENTER("new_VioSSLAcceptorFd"); DBUG_PRINT("enter", - ("key_file=%s, cert_file=%s, ca_path=%s, ca_file=%s, cipher=%s", + ("key_file: %s, cert_file: %s, ca_path: %s, ca_file: %s, cipher: %s", key_file, cert_file, ca_path, ca_file, cipher)); ptr= ((struct st_VioSSLAcceptorFd*) From b2474e7ac1e94b213d2bfa2a699afbfe5c457126 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Dec 2004 13:04:34 +0200 Subject: [PATCH 02/21] rem0rec.ic: Do not use short int in rem0rec.ic, since its size is not fixed in ANSI C; improve comments of the relative offset field in a record; use mach_read_from_2() to read the relative offset field to save CPU time, if the compiler does not optimize a more complex access function innobase/include/rem0rec.ic: Do not use short int in rem0rec.ic, since its size is not fixed in ANSI C; improve comments of the relative offset field in a record; use mach_read_from_2() to read the relative offset field to save CPU time, if the compiler does not optimize a more complex access function --- innobase/include/rem0rec.ic | 81 +++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 25 deletions(-) diff --git a/innobase/include/rem0rec.ic b/innobase/include/rem0rec.ic index 20a97f7e750..6c3dabf04a2 100644 --- a/innobase/include/rem0rec.ic +++ b/innobase/include/rem0rec.ic @@ -40,8 +40,18 @@ most significant bytes and bits are written below less significant. (1) byte offset (2) bit usage within byte downward from - origin -> 1 8 bits pointer to next record (relative) - 2 8 bits pointer to next record (relative) + origin -> 1 8 bits relative offset of next record + 2 8 bits relative offset of next record + the relative offset is an unsigned 16-bit + integer: + (offset_of_next_record + - offset_of_this_record) mod 64Ki, + where mod is the modulo as a non-negative + number; + we can calculate the the offset of the next + record with the formula: + relative_offset + offset_of_this_record + mod UNIV_PAGE_SIZE 3 3 bits status: 000=conventional record 001=node pointer record (inside B-tree) @@ -252,26 +262,37 @@ UNIV_INLINE ulint rec_get_next_offs( /*==============*/ - /* out: the page offset of the next chained record */ + /* out: the page offset of the next chained record, or + 0 if none */ rec_t* rec, /* in: physical record */ ibool comp) /* in: TRUE=compact page format */ { + ulint field_value; + + ut_ad(REC_NEXT_MASK == 0xFFFFUL); + ut_ad(REC_NEXT_SHIFT == 0); + + field_value = mach_read_from_2(rec - REC_NEXT); + if (comp) { - lint ret = (signed short) rec_get_bit_field_2(rec, REC_NEXT, - REC_NEXT_MASK, REC_NEXT_SHIFT); #if UNIV_PAGE_SIZE <= 32768 - /* with 64 KiB page size, the pointer will "wrap around", - and the following assertions are invalid */ - ut_ad(ret + ut_align_offset(rec, UNIV_PAGE_SIZE) < - UNIV_PAGE_SIZE); + /* Note that for 64 KiB pages, field_value can 'wrap around' + and the debug assertion is not valid */ + + ut_ad((int16_t)field_value + + ut_align_offset(rec, UNIV_PAGE_SIZE) + < UNIV_PAGE_SIZE); #endif - return(ret ? ut_align_offset(rec + ret, UNIV_PAGE_SIZE) : 0); - } - else { - ulint ret = rec_get_bit_field_2(rec, REC_NEXT, - REC_NEXT_MASK, REC_NEXT_SHIFT); - ut_ad(ret < UNIV_PAGE_SIZE); - return(ret); + if (field_value == 0) { + + return(0); + } + + return(ut_align_offset(rec + field_value, UNIV_PAGE_SIZE)); + } else { + ut_ad(field_value < UNIV_PAGE_SIZE); + + return(field_value); } } @@ -284,21 +305,31 @@ rec_set_next_offs( /*==============*/ rec_t* rec, /* in: physical record */ ibool comp, /* in: TRUE=compact page format */ - ulint next) /* in: offset of the next record */ + ulint next) /* in: offset of the next record, or 0 if none */ { ut_ad(rec); ut_ad(UNIV_PAGE_SIZE > next); + ut_ad(REC_NEXT_MASK == 0xFFFFUL); + ut_ad(REC_NEXT_SHIFT == 0); if (comp) { - rec_set_bit_field_2(rec, next - ? (next - ut_align_offset(rec, UNIV_PAGE_SIZE)) -#ifdef UNIV_DEBUG /* avoid an assertion failure */ - & (REC_NEXT_MASK >> REC_NEXT_SHIFT) -#endif - : 0, REC_NEXT, REC_NEXT_MASK, REC_NEXT_SHIFT); + ulint field_value; + + if (next) { + /* The following two statements calculate + next - offset_of_rec mod 64Ki, where mod is the modulo + as a non-negative number */ + + field_value = (ulint)((lint)next + - (lint)ut_align_offset(rec, UNIV_PAGE_SIZE)); + field_value &= REC_NEXT_MASK; + } else { + field_value = 0; + } + + mach_write_to_2(rec - REC_NEXT, field_value); } else { - rec_set_bit_field_2(rec, next, - REC_NEXT, REC_NEXT_MASK, REC_NEXT_SHIFT); + mach_write_to_2(rec - REC_NEXT, next); } } From 8da9faaafc00847936e023c9ddd5d63f879972cc Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Dec 2004 18:20:23 +0200 Subject: [PATCH 03/21] trx0roll.c: Print progress of background rollback of transactions with more than 1000 undo log entries srv0start.c, trx0roll.c, log0recv.c, trx0roll.h: Cleanup background rollback code in crash recovery; do not flush all modified pages from the buffer pool after a crash recovery: this makes mysqld accesible for users more quickly innobase/include/trx0roll.h: Cleanup background rollback code in crash recovery; do not flush all modified pages from the buffer pool after a crash recovery: this makes mysqld accesible for users more quickly innobase/log/log0recv.c: Cleanup background rollback code in crash recovery; do not flush all modified pages from the buffer pool after a crash recovery: this makes mysqld accesible for users more quickly innobase/srv/srv0start.c: Cleanup background rollback code in crash recovery; do not flush all modified pages from the buffer pool after a crash recovery: this makes mysqld accesible for users more quickly innobase/trx/trx0roll.c: Print progress of background rollback of transactions with more than 1000 undo log entries --- innobase/include/trx0roll.h | 16 +++++++--- innobase/log/log0recv.c | 1 - innobase/srv/srv0start.c | 12 ++++---- innobase/trx/trx0roll.c | 58 +++++++++++++++++++++++++------------ 4 files changed, 57 insertions(+), 30 deletions(-) diff --git a/innobase/include/trx0roll.h b/innobase/include/trx0roll.h index 893e5af6c01..9d025da4a5f 100644 --- a/innobase/include/trx0roll.h +++ b/innobase/include/trx0roll.h @@ -105,11 +105,19 @@ trx_rollback( Rollback or clean up transactions which have no user session. If the transaction already was committed, then we clean up a possible insert undo log. If the transaction was not yet committed, then we roll it back. -Note: this is done in a background thread */ +Note: this is done in a background thread. */ -void * -trx_rollback_or_clean_all_without_sess(void *); -/*============================================*/ +#ifndef __WIN__ +void* +#else +ulint +#endif +trx_rollback_or_clean_all_without_sess( +/*===================================*/ + /* out: a dummy parameter */ + void* arg __attribute__((unused))); + /* in: a dummy parameter required by + os_thread_create */ /******************************************************************** Finishes a transaction rollback. */ diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c index f42f0eb8c72..5eefd32c8a6 100644 --- a/innobase/log/log0recv.c +++ b/innobase/log/log0recv.c @@ -2937,7 +2937,6 @@ recv_recovery_from_checkpoint_finish(void) #ifndef UNIV_LOG_DEBUG recv_sys_free(); #endif - if (srv_force_recovery < SRV_FORCE_NO_TRX_UNDO) { os_thread_create(trx_rollback_or_clean_all_without_sess, (void *)&i, &recovery_thread_id); diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c index 69341a1d7d1..a0e763d7a44 100644 --- a/innobase/srv/srv0start.c +++ b/innobase/srv/srv0start.c @@ -1403,15 +1403,13 @@ NetWare. */ fsp_header_inc_size(0, sum_of_new_sizes, &mtr); mtr_commit(&mtr); - } - if (recv_needed_recovery) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Flushing modified pages from the buffer pool...\n"); - } + /* Immediately write the log record about increased tablespace + size to disk, so that it is durable even if mysqld would crash + quickly */ - log_make_checkpoint_at(ut_dulint_max, TRUE); + log_buffer_flush_to_disk(); + } #ifdef UNIV_LOG_ARCHIVE /* Archiving is always off under MySQL */ diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c index db5e16c7778..ca286997598 100644 --- a/innobase/trx/trx0roll.c +++ b/innobase/trx/trx0roll.c @@ -332,11 +332,19 @@ trx_savept_take( Rollback or clean up transactions which have no user session. If the transaction already was committed, then we clean up a possible insert undo log. If the transaction was not yet committed, then we roll it back. -Note: this is done in a background thread */ +Note: this is done in a background thread. */ -void * -trx_rollback_or_clean_all_without_sess(void *i) -/*========================================*/ +#ifndef __WIN__ +void* +#else +ulint +#endif +trx_rollback_or_clean_all_without_sess( +/*===================================*/ + /* out: a dummy parameter */ + void* arg __attribute__((unused))) + /* in: a dummy parameter required by + os_thread_create */ { mem_heap_t* heap; que_fork_t* fork; @@ -361,9 +369,9 @@ trx_rollback_or_clean_all_without_sess(void *i) if (UT_LIST_GET_FIRST(trx_sys->trx_list)) { fprintf(stderr, - "InnoDB: Starting rollback of uncommitted transactions\n"); +"InnoDB: Starting in background the rollback of uncommitted transactions\n"); } else { - os_thread_exit(i); + goto leave_function; } loop: heap = mem_heap_create(512); @@ -373,7 +381,6 @@ loop: trx = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx) { - if ((trx->sess || (trx->conc_state == TRX_NOT_STARTED))) { trx = UT_LIST_GET_NEXT(trx_list, trx); } else if (trx->conc_state == TRX_PREPARED) { @@ -386,17 +393,17 @@ loop: mutex_exit(&kernel_mutex); if (trx == NULL) { + ut_print_timestamp(stderr); fprintf(stderr, - "InnoDB: Rollback of uncommitted transactions completed\n"); + " InnoDB: Rollback of uncommitted transactions completed\n"); mem_heap_free(heap); - - os_thread_exit(i); + + goto leave_function; } trx->sess = trx_dummy_sess; - if (trx->conc_state == TRX_COMMITTED_IN_MEMORY) { fprintf(stderr, "InnoDB: Cleaning up trx with id %lu %lu\n", (ulong) ut_dulint_get_high(trx->id), @@ -428,13 +435,15 @@ loop: trx_roll_max_undo_no = ut_conv_dulint_to_longlong(trx->undo_no); trx_roll_progress_printed_pct = 0; rows_to_undo = trx_roll_max_undo_no; + if (rows_to_undo > 1000000000) { rows_to_undo = rows_to_undo / 1000000; unit = "M"; } + ut_print_timestamp(stderr); fprintf(stderr, -"InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo", +" InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo", (ulong) ut_dulint_get_high(trx->id), (ulong) ut_dulint_get_low(trx->id), (ulong) rows_to_undo, unit); @@ -454,7 +463,7 @@ loop: fprintf(stderr, "InnoDB: Waiting for rollback of trx id %lu to end\n", - (ulong) ut_dulint_get_low(trx->id)); + (ulong) ut_dulint_get_low(trx->id)); os_thread_sleep(100000); mutex_enter(&kernel_mutex); @@ -495,7 +504,19 @@ loop: goto loop; - os_thread_exit(i); /* not reached */ +leave_function: + /* We count the number of threads in os_thread_exit(). A created + thread should always use that to exit and not use return() to exit. */ + + os_thread_exit(NULL); + + /* The following is dummy code to keep the compiler happy: */ + +#ifndef __WIN__ + return(NULL); +#else + return(0); +#endif } /*********************************************************************** @@ -856,16 +877,17 @@ try_again: ut_ad(ut_dulint_cmp(ut_dulint_add(undo_no, 1), trx->undo_no) == 0); /* We print rollback progress info if we are in a crash recovery - and the transaction has at least 1000 row operations to undo */ + and the transaction has at least 1000 row operations to undo. + Transactions in crash recovery have sess == NULL. */ - if (srv_is_being_started && trx_roll_max_undo_no > 1000) { - progress_pct = 100 - (ulint) + if (trx->sess == NULL && trx_roll_max_undo_no > 1000) { + progress_pct = 100 - (ulint) ((ut_conv_dulint_to_longlong(undo_no) * 100) / trx_roll_max_undo_no); if (progress_pct != trx_roll_progress_printed_pct) { if (trx_roll_progress_printed_pct == 0) { fprintf(stderr, - "\nInnoDB: Progress in percents: %lu", (ulong) progress_pct); +"\nInnoDB: Progress in percents: %lu", (ulong) progress_pct); } else { fprintf(stderr, " %lu", (ulong) progress_pct); From 8b936635b066c7358d3d4a39b7014d7407c6c185 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Dec 2004 18:44:03 +0200 Subject: [PATCH 04/21] trx0roll.c: Fix the rollback progress printout in crash recovery innobase/trx/trx0roll.c: Fix the rollback progress printout in crash recovery --- innobase/trx/trx0roll.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c index ca286997598..ddb1240339d 100644 --- a/innobase/trx/trx0roll.c +++ b/innobase/trx/trx0roll.c @@ -30,9 +30,13 @@ Created 3/26/1996 Heikki Tuuri /* This many pages must be undone before a truncate is tried within rollback */ #define TRX_ROLL_TRUNC_THRESHOLD 1 +/* In crash recovery, the current trx to be rolled back */ +trx_t* trx_roll_crash_recv_trx = NULL; + /* In crash recovery we set this to the undo n:o of the current trx to be rolled back. Then we can print how many % the rollback has progressed. */ ib_longlong trx_roll_max_undo_no; + /* Auxiliary variable which tells the previous progress % we printed */ ulint trx_roll_progress_printed_pct; @@ -432,6 +436,7 @@ loop: ut_a(thr == que_fork_start_command(fork)); + trx_roll_crash_recv_trx = trx; trx_roll_max_undo_no = ut_conv_dulint_to_longlong(trx->undo_no); trx_roll_progress_printed_pct = 0; rows_to_undo = trx_roll_max_undo_no; @@ -443,7 +448,7 @@ loop: ut_print_timestamp(stderr); fprintf(stderr, -" InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo", +" InnoDB: Rolling back trx with id %lu %lu, %lu%s rows to undo\n", (ulong) ut_dulint_get_high(trx->id), (ulong) ut_dulint_get_low(trx->id), (ulong) rows_to_undo, unit); @@ -502,6 +507,8 @@ loop: (ulong) ut_dulint_get_low(trx->id)); mem_heap_free(heap); + trx_roll_crash_recv_trx = NULL; + goto loop; leave_function: @@ -877,17 +884,17 @@ try_again: ut_ad(ut_dulint_cmp(ut_dulint_add(undo_no, 1), trx->undo_no) == 0); /* We print rollback progress info if we are in a crash recovery - and the transaction has at least 1000 row operations to undo. - Transactions in crash recovery have sess == NULL. */ + and the transaction has at least 1000 row operations to undo. */ + + if (trx == trx_roll_crash_recv_trx && trx_roll_max_undo_no > 1000) { - if (trx->sess == NULL && trx_roll_max_undo_no > 1000) { progress_pct = 100 - (ulint) ((ut_conv_dulint_to_longlong(undo_no) * 100) / trx_roll_max_undo_no); if (progress_pct != trx_roll_progress_printed_pct) { if (trx_roll_progress_printed_pct == 0) { fprintf(stderr, -"\nInnoDB: Progress in percents: %lu", (ulong) progress_pct); +"\nInnoDB: Progress in percents: %lu\n", (ulong) progress_pct); } else { fprintf(stderr, " %lu", (ulong) progress_pct); From 67f5704d992b6c89cdaa6e61c8ca59e6a2879b02 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 18 Dec 2004 19:52:43 +0200 Subject: [PATCH 05/21] trx0roll.c: Put a sensible value of thread id and process no to a background rollback transaction innobase/trx/trx0roll.c: Put a sensible value of thread id and process no to a background rollback transaction --- innobase/trx/trx0roll.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/innobase/trx/trx0roll.c b/innobase/trx/trx0roll.c index ddb1240339d..e5cffd2a4f3 100644 --- a/innobase/trx/trx0roll.c +++ b/innobase/trx/trx0roll.c @@ -454,6 +454,10 @@ loop: (ulong) rows_to_undo, unit); mutex_exit(&kernel_mutex); + trx->mysql_thread_id = os_thread_get_curr_id(); + + trx->mysql_process_no = os_proc_get_number(); + if (trx->dict_operation) { row_mysql_lock_data_dictionary(trx); } From 2aa5d925373576715d6cd13d61dd0162ea59a937 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Dec 2004 03:07:05 +0300 Subject: [PATCH 06/21] fix for Bug#7386 - IM fails to compile on alpha with Compaq C++ compiler server-tools/instance-manager/mysql_connection.cc: use enum value instead of extern const in array definition server-tools/instance-manager/priv.cc: added a comment to remind of enum value in mysql_connection.cc --- server-tools/instance-manager/mysql_connection.cc | 4 +++- server-tools/instance-manager/priv.cc | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/server-tools/instance-manager/mysql_connection.cc b/server-tools/instance-manager/mysql_connection.cc index 9365edc7b91..83b046c1e5b 100644 --- a/server-tools/instance-manager/mysql_connection.cc +++ b/server-tools/instance-manager/mysql_connection.cc @@ -191,9 +191,11 @@ void Mysql_connection_thread::run() int Mysql_connection_thread::check_connection() { ulong pkt_len=0; // to hold client reply length + /* maximum size of the version string */ + enum { MAX_VERSION_LENGTH= 80 }; /* buffer for the first packet */ /* packet contains: */ - char buff[mysqlmanager_version_length + 1 + // server version, 0-ended + char buff[MAX_VERSION_LENGTH + 1 + // server version, 0-ended 4 + // connection id SCRAMBLE_LENGTH + 2 + // scramble (in 2 pieces) 18]; // server variables: flags, diff --git a/server-tools/instance-manager/priv.cc b/server-tools/instance-manager/priv.cc index 8112ebd41d8..4b47fe5b593 100644 --- a/server-tools/instance-manager/priv.cc +++ b/server-tools/instance-manager/priv.cc @@ -16,6 +16,10 @@ #include "priv.h" +/* + The following string must be less then 80 characters, as + mysql_connection.cc relies on it +*/ const char mysqlmanager_version[] = "0.2-alpha"; const int mysqlmanager_version_length= sizeof(mysqlmanager_version) - 1; From 5bc79c0c837ab9621824122e235b335fcd625c8b Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Dec 2004 12:51:40 +0300 Subject: [PATCH 07/21] Small cleanup of derived tables handling. We should not call free_tmp_table() for derived table in mysql_derived_filling(), since by this moment this table is already registered in THD::derived_tables list and thus free_tmp_table() will be called for it automatically in close_thread_tables(). sql/sql_derived.cc: We should not call free_tmp_table() for derived table in mysql_derived_filling(), since by this moment this table is already registered in THD::derived_tables list and thus free_tmp_table() will be called for it automatically in close_thread_tables(). --- sql/sql_derived.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 7cea1c6fcee..1d4b911bb65 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -268,13 +268,8 @@ int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) unit->cleanup(); } else - { - free_tmp_table(thd, table); unit->cleanup(); - } lex->current_select= save_current_select; - if (res) - free_tmp_table(thd, table); } return res; } From 2929563fe2f96bae14790beb36e9784da7950893 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Dec 2004 12:57:34 +0300 Subject: [PATCH 08/21] Fixed cut&paste bug that broke compilation with compile-pentium-valgrind-max --- innobase/os/os0proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/innobase/os/os0proc.c b/innobase/os/os0proc.c index 98254ae1055..dd2037695b7 100644 --- a/innobase/os/os0proc.c +++ b/innobase/os/os0proc.c @@ -565,7 +565,7 @@ os_mem_alloc_large( if (ptr) { if (set_to_zero) { #ifdef UNIV_SET_MEM_TO_ZERO - memset(ret, '\0', size); + memset(ptr, '\0', size); #endif } From 069041031074d606e3cb084898ea2504e716e03a Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Dec 2004 20:25:19 +0200 Subject: [PATCH 09/21] Merge with global tree Fixed some found bugs in BIT fields Added more test cases for BIT fields and varchar include/my_base.h: Merge Added HA_BIT_PART for easier test in key.cc innobase/os/os0proc.c: Fixed typo myisam/mi_dbug.c: Add printing of BIT types when debugging mysql-test/r/show_check.result: Set 'Avg_row_length' to # as this value is different between 32 and 64 bit machines mysql-test/r/type_bit.result: More tests mysql-test/r/type_varchar.result: More tests mysql-test/t/show_check.test: Set 'Avg_row_length' to # as this value is different between 32 and 64 bit machines mysql-test/t/type_bit.test: More tests mysql-test/t/type_varchar.test: More tests sql/field.cc: Added Field::val_int_as_str() to get better logging of bit fields Merged new_key_field with move_field() to fix some problems with bit fields Fixed some bugs with bit fields sql/field.h: Added Field::val_int_as_str() to get better logging of bit fields Merged new_key_field with move_field() to fix some problems with bit fields Fixed some bugs with bit fields sql/ha_myisam.cc: Fixed problem with unintialized memory (not critical) sql/key.cc: Fix so that 'key_cmp_if_same' works with bit fields. sql/opt_range.cc: Move declarations first in function Nice printing of bit fields in debug log sql/sql_prepare.cc: Fixed old merge error (not critical) sql/sql_select.cc: Use new interface to new_key_field sql/sql_select.h: Use new interface to new_key_fields() This fixes a bug with BIT fields where the upper bit of the data was not stored in the key buffer sql/structs.h: Extend key_part_flag to be able to add HA_BIT_PART to it sql/table.cc: Mark BIT key parts with HA_BIT_PART to make test in key.cc simpler --- include/my_base.h | 3 +- innobase/os/os0proc.c | 2 +- myisam/mi_dbug.c | 10 + mysql-test/r/show_check.result | 42 ++--- mysql-test/r/type_bit.result | 6 + mysql-test/r/type_varchar.result | 308 +++++++++++++++++++++++++++++++ mysql-test/t/show_check.test | 14 +- mysql-test/t/type_bit.test | 4 +- mysql-test/t/type_varchar.test | 65 +++++++ sql/field.cc | 97 +++++++++- sql/field.h | 29 +-- sql/ha_myisam.cc | 3 +- sql/key.cc | 54 +++--- sql/opt_range.cc | 18 +- sql/sql_prepare.cc | 3 - sql/sql_select.cc | 18 +- sql/sql_select.h | 7 +- sql/structs.h | 2 +- sql/table.cc | 3 + 19 files changed, 588 insertions(+), 100 deletions(-) diff --git a/include/my_base.h b/include/my_base.h index 9c860c7eebd..4d043cf6b5b 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -187,7 +187,7 @@ enum ha_base_keytype { /* Varchar (0-65535 bytes) with length packed with 2 bytes */ HA_KEYTYPE_VARTEXT2=17, /* Key is sorted as letters */ HA_KEYTYPE_VARBINARY2=18, /* Key is sorted as unsigned chars */ - HA_KEYTYPE_BIT=18 + HA_KEYTYPE_BIT=19 }; #define HA_MAX_KEYTYPE 31 /* Must be log2-1 */ @@ -237,6 +237,7 @@ enum ha_base_keytype { Only needed for internal temporary tables. */ #define HA_END_SPACE_ARE_EQUAL 512 +#define HA_BIT_PART 1024 /* optionbits for database */ #define HA_OPTION_PACK_RECORD 1 diff --git a/innobase/os/os0proc.c b/innobase/os/os0proc.c index 98254ae1055..dd2037695b7 100644 --- a/innobase/os/os0proc.c +++ b/innobase/os/os0proc.c @@ -565,7 +565,7 @@ os_mem_alloc_large( if (ptr) { if (set_to_zero) { #ifdef UNIV_SET_MEM_TO_ZERO - memset(ret, '\0', size); + memset(ptr, '\0', size); #endif } diff --git a/myisam/mi_dbug.c b/myisam/mi_dbug.c index 531d0b9ddba..e782d21afe7 100644 --- a/myisam/mi_dbug.c +++ b/myisam/mi_dbug.c @@ -131,6 +131,16 @@ void _mi_print_key(FILE *stream, register HA_KEYSEG *keyseg, key=end; break; } + case HA_KEYTYPE_BIT: + { + uint i; + fputs("0x",stream); + for (i=0 ; i < keyseg->length ; i++) + fprintf(stream, "%02x", (uint) *key++); + key= end; + break; + } + #endif case HA_KEYTYPE_VARTEXT1: /* VARCHAR and TEXT */ case HA_KEYTYPE_VARTEXT2: /* VARCHAR and TEXT */ diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index dab35262e0a..ec9bd33d301 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -314,57 +314,57 @@ insert into t2 values (1),(2); insert into t3 values (1,1),(2,2); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 2 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 2 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 2 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 2 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (3),(4); insert into t2 values (3),(4); insert into t3 values (3,3),(4,4); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 4 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 4 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 4 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 4 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 5 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 5 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 5 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 5 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1 where a=3; delete from t2 where b=3; delete from t3 where a=3; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 4 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 4 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 4 9 # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 4 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 4 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 4 # # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1; delete from t2; delete from t3; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 0 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 0 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 0 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 0 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 1 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 1 5 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 1 9 # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 1 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL delete from t1 where a=5; delete from t2 where b=5; delete from t3 where a=5; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t2 HEAP 9 Fixed 0 5 # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL -t3 HEAP 9 Fixed 0 9 # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL +t1 HEAP 9 Fixed 0 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t2 HEAP 9 Fixed 0 # # # # 5 NULL NULL NULL NULL latin1_swedish_ci NULL +t3 HEAP 9 Fixed 0 # # # # 9 NULL NULL NULL NULL latin1_swedish_ci NULL drop table t1, t2, t3; create database mysqltest; show create database mysqltest; diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index 41d84584870..45f887461e7 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -321,6 +321,12 @@ select a+0, b+0, c+0 from t1 where a = 4 and b = 0 limit 2; a+0 b+0 c+0 4 0 3 4 0 23 +select a+0, b+0, c+0 from t1 where a = 4 and b = 1; +a+0 b+0 c+0 +4 1 100 +select a+0, b+0, c+0 from t1 where a = 4 and b = 1 and c=100; +a+0 b+0 c+0 +4 1 100 select a+0, b+0, c+0 from t1 order by b desc; a+0 b+0 c+0 2 1 4 diff --git a/mysql-test/r/type_varchar.result b/mysql-test/r/type_varchar.result index 31fbe7b7b5d..1c2653bd225 100644 --- a/mysql-test/r/type_varchar.result +++ b/mysql-test/r/type_varchar.result @@ -68,3 +68,311 @@ create table t1 (v varbinary(20)); insert into t1 values('a'); insert into t1 values('a '); alter table t1 add primary key (v); +drop table t1; +create table t1 (v varchar(254), index (v)); +insert into t1 values ("This is a test "); +insert into t1 values ("Some sample data"); +insert into t1 values (" garbage "); +insert into t1 values (" This is a test "); +insert into t1 values ("This is a test"); +insert into t1 values ("Hello world"); +insert into t1 values ("Foo bar"); +insert into t1 values ("This is a test"); +insert into t1 values ("MySQL varchar test"); +insert into t1 values ("test MySQL varchar"); +insert into t1 values ("This is a long string to have some random length data included"); +insert into t1 values ("Short string"); +insert into t1 values ("VSS"); +insert into t1 values ("Some samples"); +insert into t1 values ("Bar foo"); +insert into t1 values ("Bye"); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where; Using index +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index +alter table t1 change v v varchar(255); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(256); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(257); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(258); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(259); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(258); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(257); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(256); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(255); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using filesort +alter table t1 change v v varchar(254); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 257 const 3 Using where; Using index +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index +alter table t1 change v v varchar(253); +alter table t1 change v v varchar(254), drop key v; +alter table t1 change v v varchar(300), add key (v(10)); +select * from t1 where v like 'This is a test' order by v; +v +This is a test +This is a test +select * from t1 where v='This is a test' order by v; +v +This is a test +This is a test +This is a test +select * from t1 where v like 'S%' order by v; +v +Short string +Some sample data +Some samples +explain select * from t1 where v like 'This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 13 NULL 4 Using where; Using filesort +explain select * from t1 where v='This is a test' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref v v 13 const 4 Using where +explain select * from t1 where v like 'S%' order by v; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range v v 13 NULL 2 Using where; Using filesort +drop table t1; diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index b09d7240721..8680da9b31a 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -236,37 +236,37 @@ CREATE TABLE t3 ( insert into t1 values (1),(2); insert into t2 values (1),(2); insert into t3 values (1,1),(2,2); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (3),(4); insert into t2 values (3),(4); insert into t3 values (3,3),(4,4); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1 where a=3; delete from t2 where b=3; delete from t3 where a=3; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1; delete from t2; delete from t3; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; insert into t1 values (5); insert into t2 values (5); insert into t3 values (5,5); ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; delete from t1 where a=5; delete from t2 where b=5; delete from t3 where a=5; ---replace_column 7 # 8 # 9 # +--replace_column 6 # 7 # 8 # 9 # show table status; drop table t1, t2, t3; diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test index 075fd5bce07..0c1c22099f9 100644 --- a/mysql-test/t/type_bit.test +++ b/mysql-test/t/type_bit.test @@ -88,6 +88,8 @@ select hex(min(b)) from t1 where a = 4; select hex(min(c)) from t1 where a = 4 and b = 0; select hex(max(b)) from t1; select a+0, b+0, c+0 from t1 where a = 4 and b = 0 limit 2; +select a+0, b+0, c+0 from t1 where a = 4 and b = 1; +select a+0, b+0, c+0 from t1 where a = 4 and b = 1 and c=100; select a+0, b+0, c+0 from t1 order by b desc; select a+0, b+0, c+0 from t1 order by c; drop table t1; @@ -101,6 +103,6 @@ drop table t1; # Some magic numbers create table t1 (a bit(7), key(a)); -insert into t1 values (44), (57); +insert into t1 values (44), (57); select a+0 from t1; drop table t1; diff --git a/mysql-test/t/type_varchar.test b/mysql-test/t/type_varchar.test index f6e9bb24087..0168128d513 100644 --- a/mysql-test/t/type_varchar.test +++ b/mysql-test/t/type_varchar.test @@ -32,3 +32,68 @@ create table t1 (v varbinary(20)); insert into t1 values('a'); insert into t1 values('a '); alter table t1 add primary key (v); +drop table t1; + +# +# Test with varchar of lengths 254,255,256,258 & 258 to ensure we don't +# have any problems with varchar with one or two byte length_bytes +# + +create table t1 (v varchar(254), index (v)); +insert into t1 values ("This is a test "); +insert into t1 values ("Some sample data"); +insert into t1 values (" garbage "); +insert into t1 values (" This is a test "); +insert into t1 values ("This is a test"); +insert into t1 values ("Hello world"); +insert into t1 values ("Foo bar"); +insert into t1 values ("This is a test"); +insert into t1 values ("MySQL varchar test"); +insert into t1 values ("test MySQL varchar"); +insert into t1 values ("This is a long string to have some random length data included"); +insert into t1 values ("Short string"); +insert into t1 values ("VSS"); +insert into t1 values ("Some samples"); +insert into t1 values ("Bar foo"); +insert into t1 values ("Bye"); +let $i= 255; +let $j= 5; +while ($j) +{ + select * from t1 where v like 'This is a test' order by v; + select * from t1 where v='This is a test' order by v; + select * from t1 where v like 'S%' order by v; + explain select * from t1 where v like 'This is a test' order by v; + explain select * from t1 where v='This is a test' order by v; + explain select * from t1 where v like 'S%' order by v; + eval alter table t1 change v v varchar($i); + inc $i; + dec $j; +} +let $i= 258; +let $j= 6; +while ($j) +{ + select * from t1 where v like 'This is a test' order by v; + select * from t1 where v='This is a test' order by v; + select * from t1 where v like 'S%' order by v; + explain select * from t1 where v like 'This is a test' order by v; + explain select * from t1 where v='This is a test' order by v; + explain select * from t1 where v like 'S%' order by v; + eval alter table t1 change v v varchar($i); + dec $i; + dec $j; +} +alter table t1 change v v varchar(254), drop key v; + +# Test with length(varchar) > 256 and key < 256 (to ensure things works with +# different kind of packing + +alter table t1 change v v varchar(300), add key (v(10)); +select * from t1 where v like 'This is a test' order by v; +select * from t1 where v='This is a test' order by v; +select * from t1 where v like 'S%' order by v; +explain select * from t1 where v like 'This is a test' order by v; +explain select * from t1 where v='This is a test' order by v; +explain select * from t1 where v like 'S%' order by v; +drop table t1; diff --git a/sql/field.cc b/sql/field.cc index ee12ce5ea68..ebeee476985 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -329,6 +329,27 @@ bool Field::field_cast_compatible(Field::field_cast_enum type) } +/* + Interpret field value as an integer but return the result as a string. + + This is used for printing bit_fields as numbers while debugging +*/ + +String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag) +{ + CHARSET_INFO *cs= &my_charset_bin; + uint length= 21; + longlong value= val_int(); + if (val_buffer->alloc(length)) + return 0; + length= (uint) cs->cset->longlong10_to_str(cs, (char*) val_buffer->ptr(), + length, unsigned_flag ? 10 : -10, + value); + val_buffer->length(length); + return val_buffer; +} + + /**************************************************************************** ** Functions for the base classes ** This is an unpacked number. @@ -500,6 +521,22 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table) return tmp; } + +Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field *tmp; + if ((tmp= new_field(root, new_table))) + { + tmp->ptr= new_ptr; + tmp->null_ptr= new_null_ptr; + tmp->null_bit= new_null_bit; + } + return tmp; +} + + /**************************************************************************** Field_null, a field that always return NULL ****************************************************************************/ @@ -5116,12 +5153,20 @@ Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) Field *Field_varstring::new_key_field(MEM_ROOT *root, - struct st_table *new_table) + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) { - Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); - /* Keys length prefixes are always packed with 2 bytes */ - if (res) + Field_varstring *res; + if ((res= (Field_varstring*) Field::new_key_field(root, + new_table, + new_ptr, + new_null_ptr, + new_null_bit))) + { + /* Keys length prefixes are always packed with 2 bytes */ res->length_bytes= 2; + } return res; } @@ -6210,6 +6255,43 @@ bool Field_num::eq_def(Field *field) 11 one byte for 'd' */ +Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg) + : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg), + bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7) +{ + /* + Ensure that Field::eq() can distinguish between two different bit fields. + (two bit fields that are not null, may have same ptr and null_ptr) + */ + if (!null_ptr_arg) + null_bit= bit_ofs_arg; +} + + +Field *Field_bit::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_bit *res; + if ((res= (Field_bit*) Field::new_key_field(root, new_table, + new_ptr, new_null_ptr, + new_null_bit))) + { + /* Move bits normally stored in null_pointer to new_ptr */ + res->bit_ptr= (uchar*) new_ptr; + res->bit_ofs= 0; + if (bit_len) + res->ptr++; // Store rest of data here + } + return res; +} + + void Field_bit::make_field(Send_field *field) { /* table_cache_key is not set for temp tables */ @@ -6331,7 +6413,7 @@ int Field_bit::key_cmp(const byte *str, uint length) { int flag; uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); - if ((flag= (int) (bits - *str))) + if ((flag= (int) (bits - *(uchar*) str))) return flag; str++; length--; @@ -6426,6 +6508,11 @@ void create_field::create_length_to_internal_length(void) length*= charset->mbmaxlen; key_length= pack_length; break; + case MYSQL_TYPE_BIT: + pack_length= calc_pack_length(sql_type, length); + /* We need one extra byte to store the bits we save among the null bits */ + key_length= pack_length+ test(length & 7); + break; default: key_length= pack_length= calc_pack_length(sql_type, length); break; diff --git a/sql/field.h b/sql/field.h index 2d84d240839..6ce5cf2a526 100644 --- a/sql/field.h +++ b/sql/field.h @@ -113,9 +113,14 @@ public: This trickery is used to decrease a number of malloc calls. */ virtual String *val_str(String*,String *)=0; + String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag); virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } - bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } + bool eq(Field *field) + { + return (ptr == field->ptr && null_ptr == field->null_ptr && + null_bit == field->null_bit); + } virtual bool eq_def(Field *field); virtual uint32 pack_length() const { return (uint32) field_length; } virtual void reset(void) { bzero(ptr,pack_length()); } @@ -184,11 +189,10 @@ public: virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); - virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table) - { - return new_field(root, new_table); - } - inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); + virtual void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; } @@ -994,7 +998,9 @@ public: { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; } Field *new_field(MEM_ROOT *root, struct st_table *new_table); - Field *new_key_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; @@ -1199,11 +1205,7 @@ public: Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, enum utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg) - : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), - bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7) - { } + struct st_table *table_arg); enum_field_types type() const { return FIELD_TYPE_BIT; } enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } uint32 key_length() const { return (uint32) field_length + (bit_len > 0); } @@ -1235,6 +1237,9 @@ public: field_cast_enum field_cast_type() { return FIELD_CAST_BIT; } char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index ce3287d8745..c23a728b715 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1409,7 +1409,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].type= (int) type; keydef[i].seg[j].start= pos->key_part[j].offset; keydef[i].seg[j].length= pos->key_part[j].length; - keydef[i].seg[j].bit_start=keydef[i].seg[j].bit_end=0; + keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= + keydef[i].seg[j].bit_pos= 0; keydef[i].seg[j].language = field->charset()->number; if (field->null_ptr) diff --git a/sql/key.cc b/sql/key.cc index fe35638608d..d54b8721cab 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -250,54 +250,54 @@ void key_restore(byte *to_record, byte *from_key, KEY *key_info, bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length) { - uint length; + uint store_length; KEY_PART_INFO *key_part; + const byte *key_end= key + key_length;; for (key_part=table->key_info[idx].key_part; - (int) key_length > 0; - key_part++, key+=length, key_length-=length) + key < key_end ; + key_part++, key+= store_length) { + uint length; + store_length= key_part->store_length; + if (key_part->null_bit) { - key_length--; if (*key != test(table->record[0][key_part->null_offset] & key_part->null_bit)) return 1; if (*key) - { - length=key_part->store_length; continue; - } key++; + store_length--; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART | + HA_BIT_PART)) { if (key_part->field->key_cmp(key, key_part->length)) return 1; - length=key_part->length+HA_KEY_BLOB_LENGTH; + continue; } - else + length= min((uint) (key_end-key), store_length); + if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ + FIELDFLAG_PACK))) { - length=min(key_length,key_part->length); - if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ - FIELDFLAG_PACK))) + CHARSET_INFO *cs= key_part->field->charset(); + uint char_length= key_part->length / cs->mbmaxlen; + const byte *pos= table->record[0] + key_part->offset; + if (length > char_length) { - CHARSET_INFO *cs= key_part->field->charset(); - uint char_length= key_part->length / cs->mbmaxlen; - const byte *pos= table->record[0] + key_part->offset; - if (length > char_length) - { - char_length= my_charpos(cs, pos, pos + length, char_length); - set_if_smaller(char_length, length); - } - if (cs->coll->strnncollsp(cs, - (const uchar*) key, length, - (const uchar*) pos, char_length, 0)) - return 1; + char_length= my_charpos(cs, pos, pos + length, char_length); + set_if_smaller(char_length, length); } - else if (memcmp(key,table->record[0]+key_part->offset,length)) - return 1; + if (cs->coll->strnncollsp(cs, + (const uchar*) key, length, + (const uchar*) pos, char_length, 0)) + return 1; + continue; } + if (memcmp(key,table->record[0]+key_part->offset,length)) + return 1; } return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 0f54f06a22b..80237766d29 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -5000,7 +5000,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { - ha_rows records=0,tmp; + ha_rows records=0, tmp; + uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + char *tmp_min_key, *tmp_max_key; param->max_key_part=max(param->max_key_part,key_tree->part); if (key_tree->left != &null_element) @@ -5018,13 +5020,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, return records; } - uint tmp_min_flag,tmp_max_flag,keynr; - char *tmp_min_key=min_key,*tmp_max_key=max_key; - + tmp_min_key= min_key; + tmp_max_key= max_key; key_tree->store(param->key[idx][key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - uint min_key_length= (uint) (tmp_min_key- param->min_key); - uint max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key- param->min_key); + max_key_length= (uint) (tmp_max_key- param->max_key); if (param->is_ror_scan) { @@ -8448,7 +8449,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) store_length--; } field->set_key_image((char*) key, key_part->length); - field->val_str(&tmp); + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); if (key+store_length < key_end) fputc('/',DBUG_FILE); diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6d59d465445..8afefe3cae8 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2041,10 +2041,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, thd->cleanup_after_query(); if (stmt->state == Item_arena::PREPARED) - { - thd->current_arena= thd; stmt->state= Item_arena::EXECUTED; - } DBUG_VOID_RETURN; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5425ddfb64b..44412cdc43a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8159,25 +8159,27 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, if (!using_unique_constraint) { group->buff=(char*) group_buff; - if (!(group->field=field->new_key_field(thd->mem_root,table))) + if (!(group->field= field->new_key_field(thd->mem_root,table, + (char*) group_buff + + test(maybe_null), + field->null_ptr, + field->null_bit))) goto err; /* purecov: inspected */ if (maybe_null) { /* - To be able to group on NULL, we reserve place in group_buff - for the NULL flag just before the column. + To be able to group on NULL, we reserved place in group_buff + for the NULL flag just before the column. (see above). The field data is after this flag. - The NULL flag is updated by 'end_update()' and 'end_write()' + The NULL flag is updated in 'end_update()' and 'end_write()' */ keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL key_part_info->null_bit=field->null_bit; key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); - group->field->move_field((char*) ++group->buff); - group_buff++; + group->buff++; // Pointer to field data + group_buff++; // Skipp null flag } - else - group->field->move_field((char*) group_buff); /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; group_buff+= group->field->pack_length(); diff --git a/sql/sql_select.h b/sql/sql_select.h index be3a72836b4..0f26207b391 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -427,11 +427,8 @@ class store_key :public Sql_alloc field_arg->table, field_arg->charset()); } else - { - to_field=field_arg->new_key_field(thd->mem_root, field_arg->table); - if (to_field) - to_field->move_field(ptr, (uchar*) null, 1); - } + to_field=field_arg->new_key_field(thd->mem_root, field_arg->table, + ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ virtual bool copy()=0; diff --git a/sql/structs.h b/sql/structs.h index 5d0c7bc4f1f..0b59c3abeb3 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -74,7 +74,7 @@ typedef struct st_key_part_info { /* Info about a key part */ uint16 store_length; uint16 key_type; uint16 fieldnr; /* Fieldnum in UNIREG */ - uint8 key_part_flag; /* 0 or HA_REVERSE_SORT */ + uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */ uint8 type; uint8 null_bit; /* Position to null_bit */ } KEY_PART_INFO ; diff --git a/sql/table.cc b/sql/table.cc index 610de9e4e9b..c18a2557337 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -631,6 +631,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (!(field->flags & BINARY_FLAG)) keyinfo->flags|= HA_END_SPACE_KEY; } + if (field->type() == MYSQL_TYPE_BIT) + key_part->key_part_flag|= HA_BIT_PART; + if (i == 0 && key != primary_key) field->flags |= ((keyinfo->flags & HA_NOSAME) && From fb69c85183fa51a3a7ed95297c2b9f8a98ba6b48 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Dec 2004 21:25:11 +0200 Subject: [PATCH 10/21] Changed mode of result files to -rw-rw-r-- mysql-test/r/federated.result: Change mode to -rw-rw-r-- mysql-test/r/fulltext2.result: Change mode to -rw-rw-r-- mysql-test/r/grant2.result: Change mode to -rw-rw-r-- mysql-test/r/information_schema_inno.result: Change mode to -rw-rw-r-- mysql-test/r/innodb_handler.result: Change mode to -rw-rw-r-- mysql-test/r/insert_update.result: Change mode to -rw-rw-r-- mysql-test/r/repair.result: Change mode to -rw-rw-r-- mysql-test/r/rpl_create_database.result: Change mode to -rw-rw-r-- mysql-test/r/type_bit.result: Change mode to -rw-rw-r-- From c3a4fa1f40e407926f56c690875b94f23fbd4922 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Dec 2004 16:22:58 +0100 Subject: [PATCH 11/21] ndb - Add partitioning tests to autotest ndb/test/run-test/daily-basic-tests.txt: Add partitioning tests to autotest --- ndb/test/run-test/daily-basic-tests.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 6bb91dbb986..837d0ee195f 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -485,6 +485,10 @@ max-time: 2500 cmd: testBitfield args: +max-time: 2500 +cmd: testPartitioning +args: + # # # SYSTEM RESTARTS From 961f81c3ebdca8325aa328e541c8bf8e9392aba6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Dec 2004 16:25:45 +0100 Subject: [PATCH 12/21] ndb - More printout from testDict --- ndb/test/ndbapi/testDict.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 0a43bb02fff..9a33601c85a 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -547,6 +547,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){ if (newTab.createTableInDb(pNdb) != 0){ ndbout << newTab.getName() << " could not be created" << ", fragmentType = "<getDictionary()->getNdbError() << endl; return NDBT_FAILED; } From eef88fade1a8d882eaf1814e8743df5a3a03fb9a Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Dec 2004 16:52:14 +0100 Subject: [PATCH 13/21] ndb: index tests ndb/test/ndbapi/testOIBasic.cpp: test ordering, add hash indexes, fix result verification mysql-test/ndb/ndb_range_bounds.pl: minor mysql-test/r/ndb_index_ordered.result: minor mysql-test/t/ndb_index_ordered.test: minor --- mysql-test/ndb/ndb_range_bounds.pl | 6 +- mysql-test/r/ndb_index_ordered.result | 1 + mysql-test/t/ndb_index_ordered.test | 2 + ndb/test/ndbapi/testOIBasic.cpp | 1219 +++++++++++++++++++------ 4 files changed, 962 insertions(+), 266 deletions(-) diff --git a/mysql-test/ndb/ndb_range_bounds.pl b/mysql-test/ndb/ndb_range_bounds.pl index 3b1844495b3..abe1ea28298 100644 --- a/mysql-test/ndb/ndb_range_bounds.pl +++ b/mysql-test/ndb/ndb_range_bounds.pl @@ -1,6 +1,7 @@ # # test range scan bounds # give option --all to test all cases +# set MYSQL_HOME to installation top # use strict; @@ -14,8 +15,9 @@ my $opt_verbose = 0; GetOptions("all" => \$opt_all, "cnt=i" => \$opt_cnt, "verbose" => \$opt_verbose) or die "options are: --all --cnt=N --verbose"; -my $mysql_top = $ENV{MYSQL_TOP}; -my $dsn = "dbi:mysql:database=test;host=localhost;mysql_read_default_file=$mysql_top/.target/var/my.cnf"; +my $mysql_home = $ENV{MYSQL_HOME}; +defined($mysql_home) or die "no MYSQL_HOME"; +my $dsn = "dbi:mysql:database=test;host=localhost;mysql_read_default_file=$mysql_home/var/my.cnf"; my $opts = { RaiseError => 0, PrintError => 0, AutoCommit => 1, }; my $dbh; diff --git a/mysql-test/r/ndb_index_ordered.result b/mysql-test/r/ndb_index_ordered.result index 00e3fbc5827..1cf2a97a6b3 100644 --- a/mysql-test/r/ndb_index_ordered.result +++ b/mysql-test/r/ndb_index_ordered.result @@ -383,6 +383,7 @@ b c select min(b), max(b) from t1; min(b) max(b) 1 5000000 +drop table t1; CREATE TABLE test1 ( SubscrID int(11) NOT NULL auto_increment, UsrID int(11) NOT NULL default '0', diff --git a/mysql-test/t/ndb_index_ordered.test b/mysql-test/t/ndb_index_ordered.test index 783ce99e739..42325e25ea3 100644 --- a/mysql-test/t/ndb_index_ordered.test +++ b/mysql-test/t/ndb_index_ordered.test @@ -175,6 +175,8 @@ select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b, c; select b, c from t1 where 1000<=b and b<=100000 and c<'j' order by b desc, c desc; # select min(b), max(b) from t1; +# +drop table t1; # # Bug #6435 diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 7a73ba872b2..e3cc3d0d90c 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -72,7 +72,7 @@ struct Opt { m_die(0), m_dups(false), m_fragtype(NdbDictionary::Object::FragUndefined), - m_subsubloop(4), + m_subsubloop(2), m_index(0), m_loop(1), m_msglock(true), @@ -257,6 +257,9 @@ struct Par : public Opt { bool m_verify; // deadlock possible bool m_deadlock; + // ordered range scan + bool m_ordered; + bool m_descending; // timer location Par(const Opt& opt) : Opt(opt), @@ -274,7 +277,9 @@ struct Par : public Opt { m_bdir(0), m_randomkey(false), m_verify(false), - m_deadlock(false) { + m_deadlock(false), + m_ordered(false), + m_descending(false) { } }; @@ -444,6 +449,9 @@ struct Chs { ~Chs(); }; +static NdbOut& +operator<<(NdbOut& out, const Chs& chs); + Chs::Chs(CHARSET_INFO* cs) : m_cs(cs) { @@ -455,10 +463,14 @@ Chs::Chs(CHARSET_INFO* cs) : unsigned i = 0; unsigned miss1 = 0; unsigned miss2 = 0; + unsigned miss3 = 0; + unsigned miss4 = 0; while (i < maxcharcount) { unsigned char* bytes = m_chr[i].m_bytes; unsigned char* xbytes = m_chr[i].m_xbytes; - unsigned size = m_cs->mbminlen + urandom(m_cs->mbmaxlen - m_cs->mbminlen + 1); + unsigned& size = m_chr[i].m_size; + bool ok; + size = m_cs->mbminlen + urandom(m_cs->mbmaxlen - m_cs->mbminlen + 1); assert(m_cs->mbminlen <= size && size <= m_cs->mbmaxlen); // prefer longer chars if (size == m_cs->mbminlen && m_cs->mbminlen < m_cs->mbmaxlen && urandom(5) != 0) @@ -466,33 +478,57 @@ Chs::Chs(CHARSET_INFO* cs) : for (unsigned j = 0; j < size; j++) { bytes[j] = urandom(256); } + // check wellformed const char* sbytes = (const char*)bytes; if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + size, 1) != size) { miss1++; continue; } - // do not trust well_formed_len currently + // check no proper prefix wellformed + ok = true; + for (unsigned j = 1; j < size; j++) { + if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + j, 1) == j) { + ok = false; + break; + } + } + if (! ok) { + miss2++; + continue; + } + // normalize memset(xbytes, 0, sizeof(xbytes)); // currently returns buffer size always int xlen = (*cs->coll->strnxfrm)(cs, xbytes, m_xmul * size, bytes, size); // check we got something - bool xok = false; + ok = false; for (unsigned j = 0; j < xlen; j++) { if (xbytes[j] != 0) { - xok = true; + ok = true; break; } } - if (! xok) { - miss2++; + if (! ok) { + miss3++; + continue; + } + // check for duplicate (before normalize) + ok = true; + for (unsigned j = 0; j < i; j++) { + const Chr& chr = m_chr[j]; + if (chr.m_size == size && memcmp(chr.m_bytes, bytes, size) == 0) { + ok = false; + break; + } + } + if (! ok) { + miss4++; continue; } - // occasional duplicate char is ok - m_chr[i].m_size = size; i++; } bool disorder = true; - unsigned bubbels = 0; + unsigned bubbles = 0; while (disorder) { disorder = false; for (unsigned i = 1; i < maxcharcount; i++) { @@ -502,11 +538,11 @@ Chs::Chs(CHARSET_INFO* cs) : m_chr[i] = m_chr[i-1]; m_chr[i-1] = chr; disorder = true; - bubbels++; + bubbles++; } } } - LL3("inited charset " << cs->name << " miss1=" << miss1 << " miss2=" << miss2 << " bubbels=" << bubbels); + LL3("inited charset " << *this << " miss=" << miss1 << "," << miss2 << "," << miss3 << "," << miss4 << " bubbles=" << bubbles); } Chs::~Chs() @@ -514,6 +550,14 @@ Chs::~Chs() delete [] m_chr; } +static NdbOut& +operator<<(NdbOut& out, const Chs& chs) +{ + CHARSET_INFO* cs = chs.m_cs; + out << cs->name << "[" << cs->mbminlen << "-" << cs->mbmaxlen << "," << chs.m_xmul << "]"; + return out; +} + static Chs* cslist[maxcsnumber]; static void @@ -552,22 +596,26 @@ getcs(Par par) // Col - table column struct Col { + enum Type { + Unsigned = NdbDictionary::Column::Unsigned, + Char = NdbDictionary::Column::Char + }; const class Tab& m_tab; unsigned m_num; const char* m_name; bool m_pk; - NdbDictionary::Column::Type m_type; + Type m_type; unsigned m_length; unsigned m_bytelength; bool m_nullable; const Chs* m_chs; - Col(const class Tab& tab, unsigned num, const char* name, bool pk, NdbDictionary::Column::Type type, unsigned length, bool nullable, const Chs* chs); + Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs); ~Col(); bool equal(const Col& col2) const; void verify(const void* addr) const; }; -Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, NdbDictionary::Column::Type type, unsigned length, bool nullable, const Chs* chs) : +Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs) : m_tab(tab), m_num(num), m_name(strcpy(new char [strlen(name) + 1], name)), @@ -595,9 +643,9 @@ void Col::verify(const void* addr) const { switch (m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: { CHARSET_INFO* cs = m_chs->m_cs; const char* src = (const char*)addr; @@ -616,10 +664,10 @@ operator<<(NdbOut& out, const Col& col) { out << "col[" << col.m_num << "] " << col.m_name; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: out << " unsigned"; break; - case NdbDictionary::Column::Char: + case Col::Char: { CHARSET_INFO* cs = col.m_chs->m_cs; out << " char(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")"; @@ -656,25 +704,41 @@ ICol::~ICol() { } +static NdbOut& +operator<<(NdbOut& out, const ICol& icol) +{ + out << "icol[" << icol.m_num << "] " << icol.m_col; + return out; +} + // ITab - index struct ITab { + enum Type { + OrderedIndex = NdbDictionary::Index::OrderedIndex, + UniqueHashIndex = NdbDictionary::Index::UniqueHashIndex + }; const class Tab& m_tab; const char* m_name; + Type m_type; unsigned m_icols; const ICol** m_icol; - ITab(const class Tab& tab, const char* name, unsigned icols); + unsigned m_colmask; + ITab(const class Tab& tab, const char* name, Type type, unsigned icols); ~ITab(); + void icoladd(unsigned k, const ICol* icolptr); }; -ITab::ITab(const class Tab& tab, const char* name, unsigned icols) : +ITab::ITab(const class Tab& tab, const char* name, Type type, unsigned icols) : m_tab(tab), m_name(strcpy(new char [strlen(name) + 1], name)), + m_type(type), m_icols(icols), - m_icol(new const ICol* [icols + 1]) + m_icol(new const ICol* [icols + 1]), + m_colmask(0) { - for (unsigned i = 0; i <= m_icols; i++) - m_icol[0] = 0; + for (unsigned k = 0; k <= m_icols; k++) + m_icol[k] = 0; } ITab::~ITab() @@ -685,13 +749,21 @@ ITab::~ITab() delete [] m_icol; } +void +ITab::icoladd(unsigned k, const ICol* icolptr) +{ + assert(k == icolptr->m_num && k < m_icols && m_icol[k] == 0); + m_icol[k] = icolptr; + m_colmask |= (1 << icolptr->m_col.m_num); +} + static NdbOut& operator<<(NdbOut& out, const ITab& itab) { out << "itab " << itab.m_name << " icols=" << itab.m_icols; for (unsigned k = 0; k < itab.m_icols; k++) { - out << endl; - out << "icol[" << k << "] " << itab.m_icol[k]->m_col; + const ICol& icol = *itab.m_icol[k]; + out << endl << icol; } return out; } @@ -706,6 +778,8 @@ struct Tab { const ITab** m_itab; // pk must contain an Unsigned column unsigned m_keycol; + void coladd(unsigned k, Col* colptr); + void itabadd(unsigned j, ITab* itab); Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol); ~Tab(); }; @@ -718,10 +792,10 @@ Tab::Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol) : m_itab(new const ITab* [itabs + 1]), m_keycol(keycol) { - for (unsigned i = 0; i <= cols; i++) - m_col[i] = 0; - for (unsigned i = 0; i <= itabs; i++) - m_itab[i] = 0; + for (unsigned k = 0; k <= cols; k++) + m_col[k] = 0; + for (unsigned j = 0; j <= itabs; j++) + m_itab[j] = 0; } Tab::~Tab() @@ -735,19 +809,33 @@ Tab::~Tab() delete [] m_itab; } +void +Tab::coladd(unsigned k, Col* colptr) +{ + assert(k == colptr->m_num && k < m_cols && m_col[k] == 0); + m_col[k] = colptr; +} + +void +Tab::itabadd(unsigned j, ITab* itabptr) +{ + assert(j < m_itabs && m_itab[j] == 0); + m_itab[j] = itabptr; +} + static NdbOut& operator<<(NdbOut& out, const Tab& tab) { out << "tab " << tab.m_name << " cols=" << tab.m_cols; for (unsigned k = 0; k < tab.m_cols; k++) { - out << endl; - out << *tab.m_col[k]; + const Col& col = *tab.m_col[k]; + out << endl << col; } for (unsigned i = 0; i < tab.m_itabs; i++) { if (tab.m_itab[i] == 0) continue; - out << endl; - out << *tab.m_itab[i]; + const ITab& itab = *tab.m_itab[i]; + out << endl << itab; } return out; } @@ -774,7 +862,7 @@ verifytables() { assert(t->m_keycol < t->m_cols); const Col* c = t->m_col[t->m_keycol]; - assert(c->m_pk && c->m_type == NdbDictionary::Column::Unsigned); + assert(c->m_pk && c->m_type == Col::Unsigned); } assert(t->m_itabs != 0 && t->m_itab != 0); for (unsigned i = 0; i < t->m_itabs; i++) { @@ -785,6 +873,9 @@ verifytables() for (unsigned k = 0; k < x->m_icols; k++) { const ICol* c = x->m_icol[k]; assert(c != 0 && c->m_num == k && c->m_col.m_num < t->m_cols); + if (x->m_type == ITab::UniqueHashIndex) { + assert(! c->m_col.m_nullable); + } } } assert(t->m_itab[t->m_itabs] == 0); @@ -810,127 +901,186 @@ makebuiltintables(Par par) } // ti0 - basic if (usetable(par, 0)) { - const Tab* t = new Tab("ti0", 5, 5, 0); + Tab* t = new Tab("ti0", 5, 7, 0); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[1] = new Col(*t, 1, "b", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[2] = new Col(*t, 2, "c", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[3] = new Col(*t, 3, "d", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); + t->coladd(0, new Col(*t, 0, "a", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(1, new Col(*t, 1, "b", 0, Col::Unsigned, 1, 1, 0)); + t->coladd(2, new Col(*t, 2, "c", 0, Col::Unsigned, 1, 0, 0)); + t->coladd(3, new Col(*t, 3, "d", 0, Col::Unsigned, 1, 1, 0)); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Unsigned, 1, 0, 0)); if (useindex(par, 0)) { // a - const ITab* x = t->m_itab[0] = new ITab(*t, "ti0x0", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); + ITab* x = new ITab(*t, "ti0x0", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + t->itabadd(0, x); } if (useindex(par, 1)) { // b - const ITab* x = t->m_itab[1] = new ITab(*t, "ti0x1", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti0x1", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + t->itabadd(1, x); } if (useindex(par, 2)) { // b, c - const ITab* x = t->m_itab[2] = new ITab(*t, "ti0x2", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); + ITab* x = new ITab(*t, "ti0x2", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(2, x); } if (useindex(par, 3)) { // d, c, b - const ITab* x = t->m_itab[3] = new ITab(*t, "ti0x3", 3); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[3]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[1]); + ITab* x = new ITab(*t, "ti0x3", ITab::OrderedIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[1])); + t->itabadd(3, x); } if (useindex(par, 4)) { // b, e, c, d - const ITab* x = t->m_itab[4] = new ITab(*t, "ti0x4", 4); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[4]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[3]); + ITab* x = new ITab(*t, "ti0x4", ITab::OrderedIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[3])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, c + ITab* x = new ITab(*t, "ti0z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, e + ITab* x = new ITab(*t, "ti0z6", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + t->itabadd(6, x); } tablist[0] = t; } // ti1 - simple char fields if (usetable(par, 1)) { - const Tab* t = new Tab("ti1", 5, 5, 1); + Tab* t = new Tab("ti1", 5, 7, 1); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 0, NdbDictionary::Column::Unsigned, 1, 1, 0); - t->m_col[1] = new Col(*t, 1, "b", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[2] = new Col(*t, 2, "c", 0, NdbDictionary::Column::Char, 20, 1, getcs(par)); - t->m_col[3] = new Col(*t, 3, "d", 0, NdbDictionary::Column::Char, 5, 1, getcs(par)); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Char, 5, 1, getcs(par)); + t->coladd(0, new Col(*t, 0, "a", 0, Col::Unsigned, 1, 0, 0)); + t->coladd(1, new Col(*t, 1, "b", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(2, new Col(*t, 2, "c", 0, Col::Char, 20, 1, getcs(par))); + t->coladd(3, new Col(*t, 3, "d", 0, Col::Char, 5, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 5, 1, getcs(par))); if (useindex(par, 0)) { // b - const ITab* x = t->m_itab[0] = new ITab(*t, "ti1x0", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti1x0", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); } if (useindex(par, 1)) { // a, c - const ITab* x = t->m_itab[1] = new ITab(*t, "ti1x1", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); + ITab* x = new ITab(*t, "ti1x1", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(1, x); } if (useindex(par, 2)) { // c, a - const ITab* x = t->m_itab[2] = new ITab(*t, "ti1x2", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[2]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[0]); + ITab* x = new ITab(*t, "ti1x2", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[2])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[0])); + t->itabadd(2, x); } if (useindex(par, 3)) { // e - const ITab* x = t->m_itab[3] = new ITab(*t, "ti1x3", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); + ITab* x = new ITab(*t, "ti1x3", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + t->itabadd(3, x); } if (useindex(par, 4)) { // e, d, c, b - const ITab* x = t->m_itab[4] = new ITab(*t, "ti1x4", 4); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[3]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[1]); + ITab* x = new ITab(*t, "ti1x4", ITab::OrderedIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[3])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[1])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, b + ITab* x = new ITab(*t, "ti1z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, b, d + ITab* x = new ITab(*t, "ti1z6", ITab::UniqueHashIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + t->itabadd(6, x); } tablist[1] = t; } // ti2 - complex char fields if (usetable(par, 2)) { - const Tab* t = new Tab("ti2", 5, 5, 2); + Tab* t = new Tab("ti2", 5, 7, 2); // name - pk - type - length - nullable - cs - t->m_col[0] = new Col(*t, 0, "a", 1, NdbDictionary::Column::Char, 101, 0, getcs(par)); - t->m_col[1] = new Col(*t, 1, "b", 0, NdbDictionary::Column::Char, 4, 1, getcs(par)); - t->m_col[2] = new Col(*t, 2, "c", 1, NdbDictionary::Column::Unsigned, 1, 0, 0); - t->m_col[3] = new Col(*t, 3, "d", 1, NdbDictionary::Column::Char, 3, 0, getcs(par)); - t->m_col[4] = new Col(*t, 4, "e", 0, NdbDictionary::Column::Char, 101, 0, getcs(par)); + t->coladd(0, new Col(*t, 0, "a", 1, Col::Char, 31, 0, getcs(par))); + t->coladd(1, new Col(*t, 1, "b", 0, Col::Char, 4, 1, getcs(par))); + t->coladd(2, new Col(*t, 2, "c", 1, Col::Unsigned, 1, 0, 0)); + t->coladd(3, new Col(*t, 3, "d", 1, Col::Char, 3, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 17, 0, getcs(par))); if (useindex(par, 0)) { // a, c, d - const ITab* x = t->m_itab[0] = new ITab(*t, "ti2x0", 3); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[2]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[3]); + ITab* x = new ITab(*t, "ti2x0", ITab::OrderedIndex, 3); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + t->itabadd(0, x); } if (useindex(par, 1)) { // e, d, c, b, a - const ITab* x = t->m_itab[1] = new ITab(*t, "ti2x1", 5); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[4]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[3]); - x->m_icol[2] = new ICol(*x, 2, *t->m_col[2]); - x->m_icol[3] = new ICol(*x, 3, *t->m_col[1]); - x->m_icol[4] = new ICol(*x, 4, *t->m_col[0]); + ITab* x = new ITab(*t, "ti2x1", ITab::OrderedIndex, 5); + x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[3])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[1])); + x->icoladd(4, new ICol(*x, 4, *t->m_col[0])); + t->itabadd(1, x); } if (useindex(par, 2)) { // d - const ITab* x = t->m_itab[2] = new ITab(*t, "ti2x2", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[3]); + ITab* x = new ITab(*t, "ti2x2", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); + t->itabadd(2, x); } if (useindex(par, 3)) { // b - const ITab* x = t->m_itab[3] = new ITab(*t, "ti2x3", 1); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[1]); + ITab* x = new ITab(*t, "ti2x3", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + t->itabadd(3, x); } if (useindex(par, 4)) { // a, e - const ITab* x = t->m_itab[4] = new ITab(*t, "ti2x4", 2); - x->m_icol[0] = new ICol(*x, 0, *t->m_col[0]); - x->m_icol[1] = new ICol(*x, 1, *t->m_col[4]); + ITab* x = new ITab(*t, "ti2x4", ITab::OrderedIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + t->itabadd(4, x); + } + if (useindex(par, 5)) { + // a, c + ITab* x = new ITab(*t, "ti2z5", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + t->itabadd(5, x); + } + if (useindex(par, 6)) { + // a, c, d, e + ITab* x = new ITab(*t, "ti2z6", ITab::UniqueHashIndex, 4); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); + x->icoladd(3, new ICol(*x, 3, *t->m_col[4])); + t->itabadd(6, x); } tablist[2] = t; } @@ -944,6 +1094,7 @@ struct Con { NdbDictionary::Dictionary* m_dic; NdbConnection* m_tx; NdbOperation* m_op; + NdbIndexOperation* m_indexop; NdbScanOperation* m_scanop; NdbIndexScanOperation* m_indexscanop; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; @@ -951,7 +1102,7 @@ struct Con { enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; ErrType m_errtype; Con() : - m_ndb(0), m_dic(0), m_tx(0), m_op(0), + m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0), m_scanop(0), m_indexscanop(0), m_scanmode(ScanNo), m_errtype(ErrNone) {} ~Con() { if (m_tx != 0) @@ -962,6 +1113,7 @@ struct Con { void disconnect(); int startTransaction(); int getNdbOperation(const Tab& tab); + int getNdbIndexOperation(const ITab& itab, const Tab& tab); int getNdbScanOperation(const Tab& tab); int getNdbScanOperation(const ITab& itab, const Tab& tab); int equal(int num, const char* addr); @@ -972,6 +1124,8 @@ struct Con { int execute(ExecType t, bool& deadlock); int openScanRead(unsigned scanbat, unsigned scanpar); int openScanExclusive(unsigned scanbat, unsigned scanpar); + int openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending); + int openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending); int executeScan(); int nextScanResult(bool fetchAllowed); int nextScanResult(bool fetchAllowed, bool& deadlock); @@ -1025,6 +1179,14 @@ Con::getNdbOperation(const Tab& tab) return 0; } +int +Con::getNdbIndexOperation(const ITab& itab, const Tab& tab) +{ + assert(m_tx != 0); + CHKCON((m_op = m_indexop = m_tx->getNdbIndexOperation(itab.m_name, tab.m_name)) != 0, *this); + return 0; +} + int Con::getNdbScanOperation(const Tab& tab) { @@ -1115,6 +1277,25 @@ Con::openScanExclusive(unsigned scanbat, unsigned scanpar) return 0; } +int +Con::openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending) +{ + assert(m_tx != 0 && m_indexscanop != 0); + NdbOperation::LockMode lm = NdbOperation::LM_Read; + CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); + return 0; +} + +int +Con::openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending) +{ + assert(m_tx != 0 && m_indexscanop != 0); + NdbOperation::LockMode lm = NdbOperation::LM_Exclusive; + CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); + return 0; +} + + int Con::executeScan() { @@ -1202,6 +1383,7 @@ Con::printerror(NdbOut& out) if ((code = m_tx->getNdbError().code) != 0) { LL0(++any << " con: error " << m_tx->getNdbError()); die += (code == g_opt.m_die); + // 631 is new, occurs only on 4 db nodes, needs to be checked out if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631) m_errtype = ErrDeadlock; } @@ -1290,7 +1472,7 @@ createtable(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Col& col = *tab.m_col[k]; NdbDictionary::Column c(col.m_name); - c.setType(col.m_type); + c.setType((NdbDictionary::Column::Type)col.m_type); c.setLength(col.m_bytelength); // NDB API uses length in bytes c.setPrimaryKey(col.m_pk); c.setNullable(col.m_nullable); @@ -1343,8 +1525,10 @@ createindex(Par par, const ITab& itab) LL4(itab); NdbDictionary::Index x(itab.m_name); x.setTable(tab.m_name); - x.setType(NdbDictionary::Index::OrderedIndex); - x.setLogging(false); + x.setType((NdbDictionary::Index::Type)itab.m_type); + if (par.m_nologging || itab.m_type == ITab::OrderedIndex) { + x.setLogging(false); + } for (unsigned k = 0; k < itab.m_icols; k++) { const ICol& icol = *itab.m_icol[k]; const Col& col = icol.m_col; @@ -1385,6 +1569,8 @@ struct Val { void copy(const void* addr); const void* dataaddr() const; bool m_null; + int equal(Par par) const; + int equal(Par par, const ICol& icol) const; int setval(Par par) const; void calc(Par par, unsigned i); void calckey(Par par, unsigned i); @@ -1402,9 +1588,9 @@ Val::Val(const Col& col) : m_col(col) { switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: m_char = new unsigned char [col.m_bytelength]; break; default: @@ -1417,9 +1603,9 @@ Val::~Val() { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: break; - case NdbDictionary::Column::Char: + case Col::Char: delete [] m_char; break; default: @@ -1446,10 +1632,10 @@ Val::copy(const void* addr) { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = *(const Uint32*)addr; break; - case NdbDictionary::Column::Char: + case Col::Char: memcpy(m_char, addr, col.m_bytelength); break; default: @@ -1464,9 +1650,9 @@ Val::dataaddr() const { const Col& col = m_col; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: return &m_uint32; - case NdbDictionary::Column::Char: + case Col::Char: return m_char; default: break; @@ -1475,19 +1661,38 @@ Val::dataaddr() const return 0; } +int +Val::equal(Par par) const +{ + Con& con = par.con(); + const Col& col = m_col; + assert(col.m_pk && ! m_null); + const char* addr = (const char*)dataaddr(); + LL5("equal [" << col << "] " << *this); + CHK(con.equal(col.m_num, addr) == 0); + return 0; +} + +int +Val::equal(Par par, const ICol& icol) const +{ + Con& con = par.con(); + assert(! m_null); + const char* addr = (const char*)dataaddr(); + LL5("equal [" << icol << "] " << *this); + CHK(con.equal(icol.m_num, addr) == 0); + return 0; +} + int Val::setval(Par par) const { Con& con = par.con(); const Col& col = m_col; - const char* addr = (const char*)dataaddr(); - if (m_null) - addr = 0; - LL5("setval [" << m_col << "] " << *this); - if (col.m_pk) - CHK(con.equal(col.m_num, addr) == 0); - else - CHK(con.setValue(col.m_num, addr) == 0); + assert(! col.m_pk); + const char* addr = ! m_null ? (const char*)dataaddr() : 0; + LL5("setval [" << col << "] " << *this); + CHK(con.setValue(col.m_num, addr) == 0); return 0; } @@ -1506,10 +1711,10 @@ Val::calckey(Par par, unsigned i) const Col& col = m_col; m_null = false; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = i; break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1549,10 +1754,10 @@ Val::calcnokey(Par par) } unsigned v = par.m_range + r; switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: m_uint32 = v; break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1609,7 +1814,7 @@ Val::cmp(const Val& val2) const col.verify(val2.dataaddr()); // compare switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: { if (m_uint32 < val2.m_uint32) return -1; @@ -1618,7 +1823,7 @@ Val::cmp(const Val& val2) const return 0; } break; - case NdbDictionary::Column::Char: + case Col::Char: { const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; @@ -1657,10 +1862,10 @@ operator<<(NdbOut& out, const Val& val) return out; } switch (col.m_type) { - case NdbDictionary::Column::Unsigned: + case Col::Unsigned: out << val.m_uint32; break; - case NdbDictionary::Column::Char: + case Col::Char: { char buf[4 * 8000]; char *p = buf; @@ -1697,19 +1902,25 @@ struct Row { const Tab& m_tab; Val** m_val; bool m_exist; - enum Op { NoOp = 0, ReadOp, InsOp, UpdOp, DelOp }; + enum Op { NoOp = 0, ReadOp = 1, InsOp = 2, UpdOp = 4, DelOp = 8, AnyOp = 15 }; Op m_pending; + Row* m_dbrow; // copy of db row before update Row(const Tab& tab); ~Row(); void copy(const Row& row2); - void calc(Par par, unsigned i); + void calc(Par par, unsigned i, unsigned mask = 0); + const Row& dbrow() const; int verify(const Row& row2) const; int insrow(Par par); int updrow(Par par); + int updrow(Par par, const ITab& itab); int delrow(Par par); + int delrow(Par par, const ITab& itab); int selrow(Par par); + int selrow(Par par, const ITab& itab); int setrow(Par par); int cmp(const Row& row2) const; + int cmp(const Row& row2, const ITab& itab) const; private: Row& operator=(const Row& row2); }; @@ -1724,6 +1935,7 @@ Row::Row(const Tab& tab) : } m_exist = false; m_pending = NoOp; + m_dbrow = 0; } Row::~Row() @@ -1733,6 +1945,7 @@ Row::~Row() delete m_val[k]; } delete [] m_val; + delete m_dbrow; } void @@ -1745,27 +1958,49 @@ Row::copy(const Row& row2) const Val& val2 = *row2.m_val[k]; val.copy(val2); } + m_exist = row2.m_exist; + m_pending = row2.m_pending; + if (row2.m_dbrow == 0) { + m_dbrow = 0; + } else { + assert(row2.m_dbrow->m_dbrow == 0); + if (m_dbrow == 0) + m_dbrow = new Row(tab); + m_dbrow->copy(*row2.m_dbrow); + } } void -Row::calc(Par par, unsigned i) +Row::calc(Par par, unsigned i, unsigned mask) { const Tab& tab = m_tab; for (unsigned k = 0; k < tab.m_cols; k++) { - Val& val = *m_val[k]; - val.calc(par, i); + if (! (mask & (1 << k))) { + Val& val = *m_val[k]; + val.calc(par, i); + } } } +const Row& +Row::dbrow() const +{ + if (m_dbrow == 0) + return *this; + assert(m_pending == Row::UpdOp || m_pending == Row::DelOp); + return *m_dbrow; +} + int Row::verify(const Row& row2) const { const Tab& tab = m_tab; - assert(&tab == &row2.m_tab && m_exist && row2.m_exist); + const Row& row1 = *this; + assert(&row1.m_tab == &row2.m_tab && row1.m_exist && row2.m_exist); for (unsigned k = 0; k < tab.m_cols; k++) { - const Val& val = *m_val[k]; + const Val& val1 = *row1.m_val[k]; const Val& val2 = *row2.m_val[k]; - CHK(val.verify(val2) == 0); + CHK(val1.verify(val2) == 0); } return 0; } @@ -1780,7 +2015,15 @@ Row::insrow(Par par) CHKCON(con.m_op->insertTuple() == 0, con); for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; - CHK(val.setval(par) == 0); + const Col& col = val.m_col; + if (col.m_pk) + CHK(val.equal(par) == 0); + } + for (unsigned k = 0; k < tab.m_cols; k++) { + const Val& val = *m_val[k]; + const Col& col = val.m_col; + if (! col.m_pk) + CHK(val.setval(par) == 0); } m_pending = InsOp; return 0; @@ -1797,16 +2040,40 @@ Row::updrow(Par par) for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Col& col = val.m_col; - if (! col.m_pk) - continue; - CHK(val.setval(par) == 0); + if (col.m_pk) + CHK(val.equal(par) == 0); } for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Col& col = val.m_col; - if (col.m_pk) - continue; - CHK(val.setval(par) == 0); + if (! col.m_pk) + CHK(val.setval(par) == 0); + } + m_pending = UpdOp; + return 0; +} + +int +Row::updrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + assert(m_exist); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->updateTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); + } + for (unsigned k = 0; k < tab.m_cols; k++) { + const Val& val = *m_val[k]; + const Col& col = val.m_col; + if (! col.m_pk) + CHK(val.setval(par) == 0); } m_pending = UpdOp; return 0; @@ -1824,7 +2091,27 @@ Row::delrow(Par par) const Val& val = *m_val[k]; const Col& col = val.m_col; if (col.m_pk) - CHK(val.setval(par) == 0); + CHK(val.equal(par) == 0); + } + m_pending = DelOp; + return 0; +} + +int +Row::delrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + assert(m_exist); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->deleteTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); } m_pending = DelOp; return 0; @@ -1841,7 +2128,25 @@ Row::selrow(Par par) const Val& val = *m_val[k]; const Col& col = val.m_col; if (col.m_pk) - CHK(val.setval(par) == 0); + CHK(val.equal(par) == 0); + } + return 0; +} + +int +Row::selrow(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = m_tab; + assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab); + CHK(con.getNdbIndexOperation(itab, tab) == 0); + CHKCON(con.m_op->readTuple() == 0, con); + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + const Val& val = *m_val[m]; + CHK(val.equal(par, icol) == 0); } return 0; } @@ -1876,6 +2181,40 @@ Row::cmp(const Row& row2) const return c; } +int +Row::cmp(const Row& row2, const ITab& itab) const +{ + const Tab& tab = m_tab; + int c = 0; + for (unsigned i = 0; i < itab.m_icols; i++) { + const ICol& icol = *itab.m_icol[i]; + const Col& col = icol.m_col; + unsigned k = col.m_num; + assert(k < tab.m_cols); + const Val& val = *m_val[k]; + const Val& val2 = *row2.m_val[k]; + if ((c = val.cmp(val2)) != 0) + break; + } + return c; +} + +static NdbOut& +operator<<(NdbOut& out, const Row::Op op) +{ + if (op == Row::NoOp) + out << "NoOp"; + else if (op == Row::InsOp) + out << "InsOp"; + else if (op == Row::UpdOp) + out << "UpdOp"; + else if (op == Row::DelOp) + out << "DelOp"; + else + out << op; + return out; +} + static NdbOut& operator<<(NdbOut& out, const Row& row) { @@ -1885,10 +2224,21 @@ operator<<(NdbOut& out, const Row& row) out << " "; out << *row.m_val[i]; } - out << " [exist=" << row.m_exist; + out << " exist=" << row.m_exist; if (row.m_pending) out << " pending=" << row.m_pending; - out << "]"; + if (row.m_dbrow != 0) + out << " [dbrow=" << *row.m_dbrow << "]"; + return out; +} + +static NdbOut& +operator<<(NdbOut& out, const Row* rowptr) +{ + if (rowptr == 0) + out << "null"; + else + out << *rowptr; return out; } @@ -1898,38 +2248,47 @@ struct Set { const Tab& m_tab; unsigned m_rows; Row** m_row; - Row** m_saverow; + unsigned* m_rowkey; // maps row number (from 0) in scan to tuple key Row* m_keyrow; NdbRecAttr** m_rec; Set(const Tab& tab, unsigned rows); ~Set(); void reset(); unsigned count() const; - // row methods + // old and new values bool exist(unsigned i) const; - Row::Op pending(unsigned i) const; + void dbsave(unsigned i); + void calc(Par par, unsigned i, unsigned mask = 0); + bool pending(unsigned i, unsigned mask) const; void notpending(unsigned i); void notpending(const Lst& lst); - void calc(Par par, unsigned i); + void dbdiscard(unsigned i); + void dbdiscard(const Lst& lst); + const Row& dbrow(unsigned i) const; + // operations int insrow(Par par, unsigned i); int updrow(Par par, unsigned i); + int updrow(Par par, const ITab& itab, unsigned i); int delrow(Par par, unsigned i); - int selrow(Par par, unsigned i); + int delrow(Par par, const ITab& itab, unsigned i); + int selrow(Par par, const Row& keyrow); + int selrow(Par par, const ITab& itab, const Row& keyrow); + // set and get + void setkey(Par par, const Row& keyrow); + void setkey(Par par, const ITab& itab, const Row& keyrow); int setrow(Par par, unsigned i); int getval(Par par); int getkey(Par par, unsigned* i); - int putval(unsigned i, bool force); - // set methods + int putval(unsigned i, bool force, unsigned n = ~0); + // verify int verify(const Set& set2) const; - void savepoint(); - void commit(); - void rollback(); + int verifyorder(const ITab& itab, bool descending) const; // protect structure NdbMutex* m_mutex; - void lock() { + void lock() const { NdbMutex_Lock(m_mutex); } - void unlock() { + void unlock() const { NdbMutex_Unlock(m_mutex); } private: @@ -1945,7 +2304,11 @@ Set::Set(const Tab& tab, unsigned rows) : // allocate on need to save space m_row[i] = 0; } - m_saverow = 0; + m_rowkey = new unsigned [m_rows]; + for (unsigned n = 0; n < m_rows; n++) { + // initialize to null + m_rowkey[n] = ~0; + } m_keyrow = new Row(tab); m_rec = new NdbRecAttr* [tab.m_cols]; for (unsigned k = 0; k < tab.m_cols; k++) { @@ -1959,11 +2322,9 @@ Set::~Set() { for (unsigned i = 0; i < m_rows; i++) { delete m_row[i]; - if (m_saverow != 0) - delete m_saverow[i]; } delete [] m_row; - delete [] m_saverow; + delete [] m_rowkey; delete m_keyrow; delete [] m_rec; NdbMutex_Destroy(m_mutex); @@ -1994,6 +2355,8 @@ Set::count() const return count; } +// old and new values + bool Set::exist(unsigned i) const { @@ -2003,13 +2366,37 @@ Set::exist(unsigned i) const return m_row[i]->m_exist; } -Row::Op -Set::pending(unsigned i) const +void +Set::dbsave(unsigned i) +{ + const Tab& tab = m_tab; + assert(i < m_rows && m_row[i] != 0); + Row& row = *m_row[i]; + LL5("dbsave " << i << ": " << row); + assert(row.m_exist && ! row.m_pending && row.m_dbrow == 0); + // could swap pointers but making copy is safer + Row* rowptr = new Row(tab); + rowptr->copy(row); + row.m_dbrow = rowptr; +} + +void +Set::calc(Par par, unsigned i, unsigned mask) +{ + const Tab& tab = m_tab; + if (m_row[i] == 0) + m_row[i] = new Row(tab); + Row& row = *m_row[i]; + row.calc(par, i, mask); +} + +bool +Set::pending(unsigned i, unsigned mask) const { assert(i < m_rows); if (m_row[i] == 0) // not allocated => not pending return Row::NoOp; - return m_row[i]->m_pending; + return m_row[i]->m_pending & mask; } void @@ -2017,10 +2404,13 @@ Set::notpending(unsigned i) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) + if (row.m_pending == Row::InsOp) { row.m_exist = true; - if (row.m_pending == Row::DelOp) + } else if (row.m_pending == Row::UpdOp) { + ; + } else if (row.m_pending == Row::DelOp) { row.m_exist = false; + } row.m_pending = Row::NoOp; } @@ -2034,15 +2424,35 @@ Set::notpending(const Lst& lst) } void -Set::calc(Par par, unsigned i) +Set::dbdiscard(unsigned i) { - const Tab& tab = m_tab; - if (m_row[i] == 0) - m_row[i] = new Row(tab); + assert(m_row[i] != 0); Row& row = *m_row[i]; - row.calc(par, i); + LL5("dbdiscard " << i << ": " << row); + assert(row.m_dbrow != 0); + delete row.m_dbrow; + row.m_dbrow = 0; } +const Row& +Set::dbrow(unsigned i) const +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + return row.dbrow(); +} + +void +Set::dbdiscard(const Lst& lst) +{ + for (unsigned j = 0; j < lst.m_cnt; j++) { + unsigned i = lst.m_arr[j]; + dbdiscard(i); + } +} + +// operations + int Set::insrow(Par par, unsigned i) { @@ -2061,6 +2471,15 @@ Set::updrow(Par par, unsigned i) return 0; } +int +Set::updrow(Par par, const ITab& itab, unsigned i) +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + CHK(row.updrow(par, itab) == 0); + return 0; +} + int Set::delrow(Par par, unsigned i) { @@ -2071,15 +2490,67 @@ Set::delrow(Par par, unsigned i) } int -Set::selrow(Par par, unsigned i) +Set::delrow(Par par, const ITab& itab, unsigned i) +{ + assert(m_row[i] != 0); + Row& row = *m_row[i]; + CHK(row.delrow(par, itab) == 0); + return 0; +} + +int +Set::selrow(Par par, const Row& keyrow) { Con& con = par.con(); - m_keyrow->calc(par, i); + const Tab& tab = par.tab(); + setkey(par, keyrow); + LL5("selrow " << tab.m_name << ": keyrow: " << keyrow); CHK(m_keyrow->selrow(par) == 0); CHK(getval(par) == 0); return 0; } +int +Set::selrow(Par par, const ITab& itab, const Row& keyrow) +{ + Con& con = par.con(); + setkey(par, itab, keyrow); + LL5("selrow " << itab.m_name << ": keyrow: " << keyrow); + CHK(m_keyrow->selrow(par, itab) == 0); + CHK(getval(par) == 0); + return 0; +} + +// set and get + +void +Set::setkey(Par par, const Row& keyrow) +{ + const Tab& tab = m_tab; + for (unsigned k = 0; k < tab.m_cols; k++) { + const Col& col = *tab.m_col[k]; + if (col.m_pk) { + Val& val1 = *m_keyrow->m_val[k]; + const Val& val2 = *keyrow.dbrow().m_val[k]; + val1.copy(val2); + } + } +} + +void +Set::setkey(Par par, const ITab& itab, const Row& keyrow) +{ + const Tab& tab = m_tab; + for (unsigned k = 0; k < itab.m_icols; k++) { + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + unsigned m = col.m_num; + Val& val1 = *m_keyrow->m_val[m]; + const Val& val2 = *keyrow.dbrow().m_val[m]; + val1.copy(val2); + } +} + int Set::setrow(Par par, unsigned i) { @@ -2114,7 +2585,7 @@ Set::getkey(Par par, unsigned* i) } int -Set::putval(unsigned i, bool force) +Set::putval(unsigned i, bool force, unsigned n) { const Tab& tab = m_tab; if (m_row[i] == 0) @@ -2135,55 +2606,55 @@ Set::putval(unsigned i, bool force) } if (! row.m_exist) row.m_exist = true; + if (n != ~0) + m_rowkey[n] = i; return 0; } +// verify + int Set::verify(const Set& set2) const { - const Tab& tab = m_tab; - assert(&tab == &set2.m_tab && m_rows == set2.m_rows); - LL3("verify set1 count=" << count() << " vs set2 count=" << set2.count()); + assert(&m_tab == &set2.m_tab && m_rows == set2.m_rows); + LL4("verify set1 count=" << count() << " vs set2 count=" << set2.count()); for (unsigned i = 0; i < m_rows; i++) { - CHK(exist(i) == set2.exist(i)); - if (! exist(i)) - continue; - Row& row = *m_row[i]; - Row& row2 = *set2.m_row[i]; - CHK(row.verify(row2) == 0); + bool ok = true; + if (exist(i) != set2.exist(i)) { + ok = false; + } else if (exist(i)) { + if (dbrow(i).verify(set2.dbrow(i)) != 0) + ok = false; + } + if (! ok) { + LL1("verify failed: key=" << i << " row1=" << m_row[i] << " row2=" << set2.m_row[i]); + CHK(0 == 1); + } } return 0; } -void -Set::savepoint() +int +Set::verifyorder(const ITab& itab, bool descending) const { const Tab& tab = m_tab; - assert(m_saverow == 0); - m_saverow = new Row* [m_rows]; - for (unsigned i = 0; i < m_rows; i++) { - if (m_row[i] == 0) - m_saverow[i] = 0; - else { - m_saverow[i] = new Row(tab); - m_saverow[i]->copy(*m_row[i]); - } + for (unsigned n = 0; n < m_rows; n++) { + unsigned i2 = m_rowkey[n]; + if (i2 == ~0) + break; + if (n == 0) + continue; + unsigned i1 = m_rowkey[n - 1]; + assert(i1 < m_rows && i2 < m_rows); + const Row& row1 = *m_row[i1]; + const Row& row2 = *m_row[i2]; + assert(row1.m_exist && row2.m_exist); + if (! descending) + CHK(row1.cmp(row2, itab) <= 0); + else + CHK(row1.cmp(row2, itab) >= 0); } -} - -void -Set::commit() -{ - delete [] m_saverow; - m_saverow = 0; -} - -void -Set::rollback() -{ - assert(m_saverow != 0); - m_row = m_saverow; - m_saverow = 0; + return 0; } static NdbOut& @@ -2384,7 +2855,9 @@ BSet::filter(const Set& set, Set& set2) const for (unsigned i = 0; i < set.m_rows; i++) { if (! set.exist(i)) continue; - const Row& row = *set.m_row[i]; + set.lock(); + const Row& row = set.dbrow(i); + set.unlock(); if (! g_store_null_key) { bool ok1 = false; for (unsigned k = 0; k < itab.m_icols; k++) { @@ -2430,7 +2903,6 @@ BSet::filter(const Set& set, Set& set2) const Row& row2 = *set2.m_row[i]; assert(! row2.m_exist); row2.copy(row); - row2.m_exist = true; } } @@ -2451,15 +2923,16 @@ static int pkinsert(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkinsert"); + LL3("pkinsert " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (set.exist(i) || set.pending(i)) { + if (set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } @@ -2473,7 +2946,7 @@ pkinsert(Par par) CHK(con.execute(Commit, deadlock) == 0); con.closeTransaction(); if (deadlock) { - LL1("pkinsert: stop on deadlock"); + LL1("pkinsert: stop on deadlock [at 1]"); return 0; } set.lock(); @@ -2488,7 +2961,7 @@ pkinsert(Par par) CHK(con.execute(Commit, deadlock) == 0); con.closeTransaction(); if (deadlock) { - LL1("pkinsert: stop on deadlock"); + LL1("pkinsert: stop on deadlock [at 2]"); return 0; } set.lock(); @@ -2504,8 +2977,9 @@ static int pkupdate(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkupdate"); + LL3("pkupdate " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; @@ -2513,10 +2987,11 @@ pkupdate(Par par) unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } + set.dbsave(i); set.calc(par, i); CHK(set.updrow(par, i) == 0); set.unlock(); @@ -2526,12 +3001,13 @@ pkupdate(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkupdate: stop on deadlock"); + LL1("pkupdate: stop on deadlock [at 1]"); break; } con.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); @@ -2541,10 +3017,11 @@ pkupdate(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkupdate: stop on deadlock"); + LL1("pkupdate: stop on deadlock [at 1]"); } else { set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); } } @@ -2556,8 +3033,9 @@ static int pkdelete(Par par) { Con& con = par.con(); + const Tab& tab = par.tab(); Set& set = par.set(); - LL3("pkdelete"); + LL3("pkdelete " << tab.m_name); CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; @@ -2565,7 +3043,7 @@ pkdelete(Par par) unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { set.unlock(); continue; } @@ -2577,7 +3055,7 @@ pkdelete(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkdelete: stop on deadlock"); + LL1("pkdelete: stop on deadlock [at 1]"); break; } con.closeTransaction(); @@ -2592,7 +3070,7 @@ pkdelete(Par par) deadlock = par.m_deadlock; CHK(con.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("pkdelete: stop on deadlock"); + LL1("pkdelete: stop on deadlock [at 2]"); } else { set.lock(); set.notpending(lst); @@ -2609,19 +3087,19 @@ pkread(Par par) Con& con = par.con(); const Tab& tab = par.tab(); Set& set = par.set(); - LL3((par.m_verify ? "pkverify " : "pkread ") << tab.m_name); + LL3("pkread " << tab.m_name << " verify=" << par.m_verify); // expected const Set& set1 = set; Set set2(tab, set.m_rows); for (unsigned i = 0; i < set.m_rows; i++) { set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i)) { set.unlock(); continue; } set.unlock(); CHK(con.startTransaction() == 0); - CHK(set2.selrow(par, i) == 0); + CHK(set2.selrow(par, *set1.m_row[i]) == 0); CHK(con.execute(Commit) == 0); unsigned i2 = (unsigned)-1; CHK(set2.getkey(par, &i2) == 0 && i == i2); @@ -2659,6 +3137,146 @@ pkreadfast(Par par, unsigned count) return 0; } +// hash index operations + +static int +hashindexupdate(Par par, const ITab& itab) +{ + Con& con = par.con(); + Set& set = par.set(); + LL3("hashindexupdate " << itab.m_name); + CHK(con.startTransaction() == 0); + Lst lst; + bool deadlock = false; + for (unsigned j = 0; j < par.m_rows; j++) { + unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); + unsigned i = thrrow(par, j2); + set.lock(); + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { + set.unlock(); + continue; + } + set.dbsave(i); + // index key columns are not re-calculated + set.calc(par, i, itab.m_colmask); + CHK(set.updrow(par, itab, i) == 0); + set.unlock(); + LL4("hashindexupdate " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexupdate: stop on deadlock [at 1]"); + break; + } + con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.dbdiscard(lst); + set.unlock(); + lst.reset(); + CHK(con.startTransaction() == 0); + } + } + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexupdate: stop on deadlock [at 1]"); + } else { + set.lock(); + set.notpending(lst); + set.dbdiscard(lst); + set.unlock(); + } + } + con.closeTransaction(); + return 0; +}; + +static int +hashindexdelete(Par par, const ITab& itab) +{ + Con& con = par.con(); + Set& set = par.set(); + LL3("hashindexdelete " << itab.m_name); + CHK(con.startTransaction() == 0); + Lst lst; + bool deadlock = false; + for (unsigned j = 0; j < par.m_rows; j++) { + unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); + unsigned i = thrrow(par, j2); + set.lock(); + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { + set.unlock(); + continue; + } + CHK(set.delrow(par, itab, i) == 0); + set.unlock(); + LL4("hashindexdelete " << i << ": " << *set.m_row[i]); + lst.push(i); + if (lst.cnt() == par.m_batch) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexdelete: stop on deadlock [at 1]"); + break; + } + con.closeTransaction(); + set.lock(); + set.notpending(lst); + set.unlock(); + lst.reset(); + CHK(con.startTransaction() == 0); + } + } + if (! deadlock && lst.cnt() != 0) { + deadlock = par.m_deadlock; + CHK(con.execute(Commit, deadlock) == 0); + if (deadlock) { + LL1("hashindexdelete: stop on deadlock [at 2]"); + } else { + set.lock(); + set.notpending(lst); + set.unlock(); + } + } + con.closeTransaction(); + return 0; +}; + +static int +hashindexread(Par par, const ITab& itab) +{ + Con& con = par.con(); + const Tab& tab = par.tab(); + Set& set = par.set(); + LL3("hashindexread " << itab.m_name << " verify=" << par.m_verify); + // expected + const Set& set1 = set; + Set set2(tab, set.m_rows); + for (unsigned i = 0; i < set.m_rows; i++) { + set.lock(); + if (! set.exist(i)) { + set.unlock(); + continue; + } + set.unlock(); + CHK(con.startTransaction() == 0); + CHK(set2.selrow(par, itab, *set1.m_row[i]) == 0); + CHK(con.execute(Commit) == 0); + unsigned i2 = (unsigned)-1; + CHK(set2.getkey(par, &i2) == 0 && i == i2); + CHK(set2.putval(i, false) == 0); + LL4("row " << set2.count() << ": " << *set2.m_row[i]); + con.closeTransaction(); + } + if (par.m_verify) + CHK(set1.verify(set2) == 0); + return 0; +} + // scan read static int @@ -2691,14 +3309,14 @@ scanreadtable(Par par) } unsigned i = (unsigned)-1; CHK(set2.getkey(par, &i) == 0); - CHK(set2.putval(i, false) == 0); + CHK(set2.putval(i, false, n) == 0); LL4("row " << n << ": " << *set2.m_row[i]); n++; } con.closeTransaction(); if (par.m_verify) CHK(set1.verify(set2) == 0); - LL3("scanread " << tab.m_name << " rows=" << n); + LL3("scanread " << tab.m_name << " done rows=" << n); return 0; } @@ -2745,19 +3363,22 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) // prefer proper subset if (0 < n && n < set.m_rows) break; - if (urandom(5) == 0) + if (urandom(3) == 0) break; set1.reset(); } } else { bset.filter(set, set1); } - LL3("scanread " << itab.m_name << " bounds=" << bset.m_bvals << " verify=" << par.m_verify); + LL3("scanread " << itab.m_name << " bounds=" << bset << " verify=" << par.m_verify << " ordered=" << par.m_ordered << " descending=" << par.m_descending); LL4("expect " << set1.count() << " rows"); Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(itab, tab) == 0); - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + if (! par.m_ordered) + CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + else + CHK(con.openScanOrdered(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -2775,15 +3396,17 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) } unsigned i = (unsigned)-1; CHK(set2.getkey(par, &i) == 0); - LL4("key " << i); - CHK(set2.putval(i, par.m_dups) == 0); - LL4("row " << n << ": " << *set2.m_row[i]); + CHK(set2.putval(i, par.m_dups, n) == 0); + LL4("key " << i << " row " << n << ": " << *set2.m_row[i]); n++; } con.closeTransaction(); - if (par.m_verify) + if (par.m_verify) { CHK(set1.verify(set2) == 0); - LL3("scanread " << itab.m_name << " rows=" << n); + if (par.m_ordered) + CHK(set2.verifyorder(itab, par.m_descending) == 0); + } + LL3("scanread " << itab.m_name << " done rows=" << n); return 0; } @@ -2821,8 +3444,10 @@ scanreadindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); for (unsigned i = 0; i < par.m_subsubloop; i++) { - BSet bset(tab, itab, par.m_rows); - CHK(scanreadindex(par, itab, bset, true) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + CHK(scanreadindex(par, itab, bset, true) == 0); + } } return 0; } @@ -2835,7 +3460,11 @@ scanreadindex(Par par) if (tab.m_itab[i] == 0) continue; const ITab& itab = *tab.m_itab[i]; - CHK(scanreadindex(par, itab) == 0); + if (itab.m_type == ITab::OrderedIndex) { + CHK(scanreadindex(par, itab) == 0); + } else { + CHK(hashindexread(par, itab) == 0); + } } return 0; } @@ -2932,7 +3561,7 @@ scanupdatetable(Par par) if (ret == 1) break; if (deadlock) { - LL1("scanupdatetable: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 1]"); break; } if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) { @@ -2944,13 +3573,14 @@ scanupdatetable(Par par) CHK(set2.getkey(par, &i) == 0); const Row& row = *set.m_row[i]; set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { LL4("scan update " << tab.m_name << ": skip: " << row); } else { CHKTRY(set2.putval(i, false) == 0, set.unlock()); CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); Par par2 = par; par2.m_con = &con2; + set.dbsave(i); set.calc(par, i); CHKTRY(set.setrow(par2, i) == 0, set.unlock()); LL4("scan update " << tab.m_name << ": " << row); @@ -2961,12 +3591,13 @@ scanupdatetable(Par par) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 2]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -2977,12 +3608,13 @@ scanupdatetable(Par par) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdatetable: stop on deadlock [at 3]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3009,7 +3641,10 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(itab, tab) == 0); - CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); + if (! par.m_ordered) + CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); + else + CHK(con.openScanOrderedExclusive(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -3027,7 +3662,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) if (ret == 1) break; if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 1]"); break; } if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) { @@ -3039,13 +3674,14 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK(set2.getkey(par, &i) == 0); const Row& row = *set.m_row[i]; set.lock(); - if (! set.exist(i) || set.pending(i)) { + if (! set.exist(i) || set.pending(i, Row::AnyOp)) { LL4("scan update " << itab.m_name << ": skip: " << row); } else { CHKTRY(set2.putval(i, par.m_dups) == 0, set.unlock()); CHKTRY(con.updateScanTuple(con2) == 0, set.unlock()); Par par2 = par; par2.m_con = &con2; + set.dbsave(i); set.calc(par, i); CHKTRY(set.setrow(par2, i) == 0, set.unlock()); LL4("scan update " << itab.m_name << ": " << row); @@ -3056,12 +3692,13 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 2]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3072,12 +3709,13 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) deadlock = par.m_deadlock; CHK(con2.execute(Commit, deadlock) == 0); if (deadlock) { - LL1("scanupdateindex: stop on deadlock"); + LL1("scanupdateindex: stop on deadlock [at 3]"); goto out; } con2.closeTransaction(); set.lock(); set.notpending(lst); + set.dbdiscard(lst); set.unlock(); count += lst.cnt(); lst.reset(); @@ -3097,9 +3735,13 @@ scanupdateindex(Par par, const ITab& itab) { const Tab& tab = par.tab(); for (unsigned i = 0; i < par.m_subsubloop; i++) { - BSet bset(tab, itab, par.m_rows); - bset.calc(par); - CHK(scanupdateindex(par, itab, bset) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + bset.calc(par); + CHK(scanupdateindex(par, itab, bset) == 0); + } else { + CHK(hashindexupdate(par, itab) == 0); + } } return 0; } @@ -3151,8 +3793,12 @@ readverifyfull(Par par) unsigned i = par.m_no - 1; if (i < tab.m_itabs && tab.m_itab[i] != 0) { const ITab& itab = *tab.m_itab[i]; - BSet bset(tab, itab, par.m_rows); - CHK(scanreadindex(par, itab, bset, false) == 0); + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + CHK(scanreadindex(par, itab, bset, false) == 0); + } else { + CHK(hashindexread(par, itab) == 0); + } } } return 0; @@ -3162,6 +3808,11 @@ static int readverifyindex(Par par) { par.m_verify = true; + unsigned sel = urandom(10); + if (sel < 9) { + par.m_ordered = true; + par.m_descending = (sel < 5); + } CHK(scanreadindex(par) == 0); return 0; } @@ -3169,26 +3820,56 @@ readverifyindex(Par par) static int pkops(Par par) { + const Tab& tab = par.tab(); par.m_randomkey = true; for (unsigned i = 0; i < par.m_subsubloop; i++) { + unsigned j = 0; + while (j < tab.m_itabs) { + if (tab.m_itab[j] != 0) { + const ITab& itab = *tab.m_itab[j]; + if (itab.m_type == ITab::UniqueHashIndex && urandom(5) == 0) + break; + } + j++; + } unsigned sel = urandom(10); if (par.m_slno % 2 == 0) { // favor insert if (sel < 8) { CHK(pkinsert(par) == 0); } else if (sel < 9) { - CHK(pkupdate(par) == 0); + if (j == tab.m_itabs) + CHK(pkupdate(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexupdate(par, itab) == 0); + } } else { - CHK(pkdelete(par) == 0); + if (j == tab.m_itabs) + CHK(pkdelete(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexdelete(par, itab) == 0); + } } } else { // favor delete if (sel < 1) { CHK(pkinsert(par) == 0); } else if (sel < 2) { - CHK(pkupdate(par) == 0); + if (j == tab.m_itabs) + CHK(pkupdate(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexupdate(par, itab) == 0); + } } else { - CHK(pkdelete(par) == 0); + if (j == tab.m_itabs) + CHK(pkdelete(par) == 0); + else { + const ITab& itab = *tab.m_itab[j]; + CHK(hashindexdelete(par, itab) == 0); + } } } } @@ -3208,6 +3889,10 @@ pkupdatescanread(Par par) CHK(scanreadtable(par) == 0); } else { par.m_verify = false; + if (sel < 8) { + par.m_ordered = true; + par.m_descending = (sel < 7); + } CHK(scanreadindex(par) == 0); } return 0; @@ -3227,6 +3912,10 @@ mixedoperations(Par par) } else if (sel < 6) { CHK(scanupdatetable(par) == 0); } else { + if (sel < 8) { + par.m_ordered = true; + par.m_descending = (sel < 7); + } CHK(scanupdateindex(par) == 0); } return 0; @@ -3720,7 +4409,7 @@ printtables() { Par par(g_opt); makebuiltintables(par); - ndbout << "builtin tables (index x0 is on table pk):" << endl; + ndbout << "builtin tables (x0 on pk, x=ordered z=hash):" << endl; for (unsigned j = 0; j < tabcount; j++) { if (tablist[j] == 0) continue; @@ -3744,6 +4433,7 @@ runtest(Par par) LL1("random seed: " << seed); srandom((unsigned)seed); } else if (par.m_seed != 0) + LL1("random seed: " << par.m_seed); srandom(par.m_seed); // cs assert(par.m_csname != 0); @@ -3953,7 +4643,8 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) if (strcmp(arg, "-threads") == 0) { if (++argv, --argc > 0) { g_opt.m_threads = atoi(argv[0]); - continue; + if (1 <= g_opt.m_threads) + continue; } } if (strcmp(arg, "-v") == 0) { @@ -3970,7 +4661,7 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535) printhelp(); goto wrongargs; } - ndbout << "testOIBasic: unknown option " << arg; + ndbout << "testOIBasic: bad or unknown option " << arg; goto usage; } { From 19ee81dfb989c39ff25259412703162988f4d291 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 20 Dec 2004 20:17:36 +0300 Subject: [PATCH 14/21] Fixed compilation for old Linux distributions that have no HugeTLB support --- innobase/os/os0proc.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/innobase/os/os0proc.c b/innobase/os/os0proc.c index dd2037695b7..167aed93de7 100644 --- a/innobase/os/os0proc.c +++ b/innobase/os/os0proc.c @@ -532,7 +532,7 @@ os_mem_alloc_large( ibool assert_on_error) /* in: if TRUE, we crash mysqld if the memory cannot be allocated */ { -#ifdef UNIV_LINUX +#ifdef HAVE_LARGE_PAGES ulint size; int shmid; void *ptr = NULL; @@ -541,7 +541,8 @@ os_mem_alloc_large( if (!os_use_large_pages || !os_large_page_size) { goto skip; } - + +#ifdef UNIV_LINUX /* Align block size to os_large_page_size */ size = ((n - 1) & ~(os_large_page_size - 1)) + os_large_page_size; @@ -561,6 +562,7 @@ os_mem_alloc_large( */ shmctl(shmid, IPC_RMID, &buf); } +#endif if (ptr) { if (set_to_zero) { @@ -573,8 +575,8 @@ os_mem_alloc_large( } fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional memory pool\n"); -#endif skip: +#endif /* HAVE_LARGE_PAGES */ return(ut_malloc_low(n, set_to_zero, assert_on_error)); } @@ -587,8 +589,12 @@ os_mem_free_large( /*=================*/ void *ptr) /* in: number of bytes */ { +#ifdef HAVE_LARGE_PAGES + if (os_use_large_pages && os_large_page_size #ifdef UNIV_LINUX - if (os_use_large_pages && os_large_page_size && !shmdt(ptr)) { + && !shmdt(ptr) +#endif + ) { return; } #endif From b4c941bd7d6814964c0297bf799179f4a980d225 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 07:51:17 +0100 Subject: [PATCH 15/21] ndb - autotest bug fixes ndb/test/ndbapi/testDict.cpp: Drop table at end of testDict createTableWhenDbIsFull --- ndb/test/ndbapi/testDict.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 9a33601c85a..221c035e368 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -125,6 +125,16 @@ int runCreateTheTable(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runDropTheTable(NDBT_Context* ctx, NDBT_Step* step){ + Ndb* pNdb = GETNDB(step); + const NdbDictionary::Table* pTab = ctx->getTab(); + + // Try to create table in db + pNdb->getDictionary()->dropTable(pTab->getName()); + + return NDBT_OK; +} + int runCreateTableWhenDbIsFull(NDBT_Context* ctx, NDBT_Step* step){ Ndb* pNdb = GETNDB(step); int result = NDBT_OK; @@ -1584,7 +1594,7 @@ TESTCASE("CreateTableWhenDbIsFull", INITIALIZER(runFillTable); INITIALIZER(runCreateTableWhenDbIsFull); INITIALIZER(runDropTableWhenDbIsFull); - FINALIZER(runClearTable); + FINALIZER(runDropTheTable); } TESTCASE("FragmentTypeSingle", "Create the table with fragment type Single\n"){ From 10ea49c7186143b5fabea97f8be5a4c93c1e0ae3 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 08:40:01 +0100 Subject: [PATCH 16/21] ndb - testIndex -n NFNR3 ndb/test/ndbapi/testIndex.cpp: Use stop and wait to capture error during restart ndb/test/src/UtilTransactions.cpp: Add extra check for readRowFromIndex failure due to loosing of locks in scan --- ndb/test/ndbapi/testIndex.cpp | 9 ++++++++- ndb/test/src/UtilTransactions.cpp | 23 ++++++++++++++++++----- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 25991ac375f..3526326b680 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1277,7 +1277,7 @@ TESTCASE("CreateLoadDrop_O", TESTCASE("NFNR1", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); - //TC_PROPERTY("Threads", 2); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(runLoadTable); @@ -1292,6 +1292,7 @@ TESTCASE("NFNR1_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(runLoadTable); @@ -1305,6 +1306,7 @@ TESTCASE("NFNR1_O", TESTCASE("NFNR2", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1321,6 +1323,7 @@ TESTCASE("NFNR2_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1336,6 +1339,7 @@ TESTCASE("NFNR2_O", TESTCASE("NFNR3", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1351,6 +1355,7 @@ TESTCASE("NFNR3_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 2); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1365,6 +1370,7 @@ TESTCASE("NFNR3_O", TESTCASE("NFNR4", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 4); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); @@ -1383,6 +1389,7 @@ TESTCASE("NFNR4_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); + TC_PROPERTY("PauseThreads", 4); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp index f4ac466820f..a7c9751ed09 100644 --- a/ndb/test/src/UtilTransactions.cpp +++ b/ndb/test/src/UtilTransactions.cpp @@ -629,7 +629,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, parallelism = 1; while (true){ - +restart: if (retryAttempt >= retryMax){ g_info << "ERROR: has retried this operation " << retryAttempt << " times, failing!" << endl; @@ -719,11 +719,26 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, // ndbout << row.c_str().c_str() << endl; - if (readRowFromTableAndIndex(pNdb, pTrans, pIndex, row) != NDBT_OK){ + + while((eof= pOp->nextResult(false)) == 0); + if(eof == 2) + eof = pOp->nextResult(true); // this should give -1 + if(eof == -1) + { + const NdbError err = pTrans->getNdbError(); + + if (err.status == NdbError::TemporaryError){ + ERR(err); + pNdb->closeTransaction(pTrans); + NdbSleep_MilliSleep(50); + retryAttempt++; + goto restart; + } + } pNdb->closeTransaction(pTrans); return NDBT_FAILED; } @@ -736,7 +751,6 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb, pNdb->closeTransaction(pTrans); NdbSleep_MilliSleep(50); retryAttempt++; - rows--; continue; } ERR(err); @@ -811,7 +825,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, check = pOp->readTuple(); if( check == -1 ) { ERR(pTrans1->getNdbError()); - pNdb->closeTransaction(pTrans1); goto close_all; } @@ -943,7 +956,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb, #if VERBOSE printf("\n"); #endif - + scanTrans->refresh(); check = pTrans1->execute(Commit); if( check == -1 ) { const NdbError err = pTrans1->getNdbError(); From 7b09b67d7798008e4fbe9a452eff6530bc4c9eba Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 08:59:40 +0100 Subject: [PATCH 17/21] ndb - autotest fixes ndb/test/src/HugoTransactions.cpp: Fix indexRead (range) --- ndb/test/src/HugoTransactions.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 7a641afd8f3..d9207386bf0 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -1097,7 +1097,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, pNdb->closeTransaction(pTrans); return NDBT_FAILED; } - } else{ + } else { if(pIndexScanOp) { int rows_found = 0; @@ -1759,7 +1759,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, pNdb->closeTransaction(pTrans); return NDBT_FAILED; } - check = 0; + check = sOp->readTuples(); } if( check == -1 ) { @@ -1948,7 +1948,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, } if(ordered && check != 0){ - g_err << "Row: " << r << " not found!!" << endl; + g_err << check << " - Row: " << r << " not found!!" << endl; pNdb->closeTransaction(pTrans); return NDBT_FAILED; } From 569310001fdaacb68e60d3433a490aaf6cd0fe68 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 09:36:27 +0100 Subject: [PATCH 18/21] ndb - testNdbApi -n UpdatesWithoutValues ndb/test/ndbapi/testNdbApi.cpp: testNdbApi -n UpdatesWithoutValues --- ndb/test/ndbapi/testNdbApi.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp index a1ebac609b6..c0393403760 100644 --- a/ndb/test/ndbapi/testNdbApi.cpp +++ b/ndb/test/ndbapi/testNdbApi.cpp @@ -799,13 +799,13 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){ // Dont' call any setValues - // Execute should not work + // Execute should work int check = pCon->execute(Commit); if (check == 0){ ndbout << "execute worked" << endl; - result = NDBT_FAILED; } else { ERR(pCon->getNdbError()); + result = NDBT_FAILED; } pNdb->closeTransaction(pCon); From 6436cab8382a6e609c3f3866a9515339517abff8 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 11:21:27 +0100 Subject: [PATCH 19/21] ndb - testTransactions ndb/test/run-test/daily-basic-tests.txt: Run testTransactions on table explicitly to avoid unhandled error --- ndb/test/run-test/daily-basic-tests.txt | 70 +++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 3 deletions(-) diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 837d0ee195f..fed7b49cec7 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -465,9 +465,73 @@ max-time: 150000 cmd: testOperations args: -max-time: 150000 +max-time: 1500 cmd: testTransactions -args: +args: T1 + +max-time: 1500 +cmd: testTransactions +args: T2 + +max-time: 1500 +cmd: testTransactions +args: T3 + +max-time: 1500 +cmd: testTransactions +args: T4 + +max-time: 1500 +cmd: testTransactions +args: T5 + +max-time: 1500 +cmd: testTransactions +args: T6 + +max-time: 1500 +cmd: testTransactions +args: T7 + +max-time: 1500 +cmd: testTransactions +args: T8 + +max-time: 1500 +cmd: testTransactions +args: T9 + +max-time: 1500 +cmd: testTransactions +args: T10 + +max-time: 1500 +cmd: testTransactions +args: T11 + +max-time: 1500 +cmd: testTransactions +args: T12 + +max-time: 1500 +cmd: testTransactions +args: T13 + +max-time: 1500 +cmd: testTransactions +args: T14 + +max-time: 1500 +cmd: testTransactions +args: I1 + +max-time: 1500 +cmd: testTransactions +args: I2 + +max-time: 1500 +cmd: testTransactions +args: I3 max-time: 1500 cmd: testRestartGci @@ -477,7 +541,7 @@ max-time: 600 cmd: testBlobs args: -max-time: 2500 +max-time: 5000 cmd: testOIBasic args: From 54b1873084abec2b4b9492d77afa07207ed85043 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 20:52:52 +1000 Subject: [PATCH 20/21] Changes in config for generation of NDB API docs: don't include header files or class private members; do include class static members. ndb/docs/doxygen/Doxyfile.mgmapi: Disable inclusion of headers. Enable inclusion of static members. ndb/docs/doxygen/Doxyfile.ndbapi: Disable inclusion of headers. Enable inclusion of static members. ndb/docs/doxygen/Doxyfile.ndb: Disable inclusion of headers. Disable inclusion of private members. ndb/docs/doxygen/Doxyfile.odbc: Disable inclusion of headers. Disable inclusion of private members. ndb/docs/doxygen/Doxyfile.test: Disable inclusion of headers. Disable inclusion of private members. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + ndb/docs/doxygen/Doxyfile.mgmapi | 4 ++-- ndb/docs/doxygen/Doxyfile.ndb | 4 ++-- ndb/docs/doxygen/Doxyfile.ndbapi | 4 ++-- ndb/docs/doxygen/Doxyfile.odbc | 4 ++-- ndb/docs/doxygen/Doxyfile.test | 4 ++-- 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 778911f1479..fb38218dc8c 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -94,6 +94,7 @@ jcole@sarvik.tfr.cafe.ee jcole@tetra.spaceapes.com jimw@mysql.com joerg@mysql.com +jon@gigan. joreland@mysql.com jorge@linux.jorge.mysql.com jplindst@t41.(none) diff --git a/ndb/docs/doxygen/Doxyfile.mgmapi b/ndb/docs/doxygen/Doxyfile.mgmapi index 4287b37fd97..9db58393ffc 100644 --- a/ndb/docs/doxygen/Doxyfile.mgmapi +++ b/ndb/docs/doxygen/Doxyfile.mgmapi @@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. -EXTRACT_STATIC = NO +EXTRACT_STATIC = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. @@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.ndb b/ndb/docs/doxygen/Doxyfile.ndb index d43a66323f8..3db42ee78af 100644 --- a/ndb/docs/doxygen/Doxyfile.ndb +++ b/ndb/docs/doxygen/Doxyfile.ndb @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.ndbapi b/ndb/docs/doxygen/Doxyfile.ndbapi index 61d58d4fea3..5ca09e4851d 100644 --- a/ndb/docs/doxygen/Doxyfile.ndbapi +++ b/ndb/docs/doxygen/Doxyfile.ndbapi @@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. -EXTRACT_STATIC = NO +EXTRACT_STATIC = YES # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. @@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.odbc b/ndb/docs/doxygen/Doxyfile.odbc index 93e052d5b9d..88c70b2ccf6 100644 --- a/ndb/docs/doxygen/Doxyfile.odbc +++ b/ndb/docs/doxygen/Doxyfile.odbc @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation diff --git a/ndb/docs/doxygen/Doxyfile.test b/ndb/docs/doxygen/Doxyfile.test index 34ee21873ff..762013cc1cf 100644 --- a/ndb/docs/doxygen/Doxyfile.test +++ b/ndb/docs/doxygen/Doxyfile.test @@ -52,7 +52,7 @@ EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. -EXTRACT_PRIVATE = YES +EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. @@ -157,7 +157,7 @@ HIDE_SCOPE_NAMES = NO # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. -VERBATIM_HEADERS = YES +VERBATIM_HEADERS = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put list of the files that are included by a file in the documentation From b58224a54f309ca236e31c698e948797e194ab03 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 21 Dec 2004 13:21:59 +0100 Subject: [PATCH 21/21] Updated docs --- ndb/include/ndbapi/Ndb.hpp | 438 +++++++++++++-------------- ndb/include/ndbapi/NdbConnection.hpp | 34 ++- ndb/include/ndbapi/NdbDictionary.hpp | 9 +- 3 files changed, 237 insertions(+), 244 deletions(-) diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp index 5ec09269695..b3475841c87 100644 --- a/ndb/include/ndbapi/Ndb.hpp +++ b/ndb/include/ndbapi/Ndb.hpp @@ -17,30 +17,37 @@ /** @mainpage NDB API Programmers' Guide - This guide assumes a basic familiarity with NDB Cluster concepts. + This guide assumes a basic familiarity with MySQL Cluster concepts. Some of the fundamental ones are described in section @ref secConcepts. - The NDB API is an NDB Cluster application interface - that implements both synchronous and asynchronous transactions. + The NDB API is an MySQL Cluster application interface + that implements transactions. The NDB API consists of the following fundamental classes: + - Ndb_cluster_connection class representing a connection to a cluster, - Ndb is the main class representing the database, - NdbConnection represents a transaction, - - NdbOperation represents a transaction operation using primary key, - - NdbIndexOperation represents a transaction operation using a secondary - index, + - NdbOperation represents a operation using primary key, + - NdbScanOperation represents a operation performing a full table scan. + - NdbIndexOperation represents a operation using a unique hash index, + - NdbIndexScanOperation represents a operation performing a scan using + an ordered index, - NdbRecAttr represents the value of an attribute, and - NdbDictionary represents meta information about tables and attributes. - - NdbError represents an error condition + - NdbError contains a specification of an error. There are also some auxiliary classes. The main structure of an application program is as follows: + -# Construct and connect to a cluster using the Ndb_cluster_connection + object. -# Construct and initialize Ndb object(s). - -# Define and execute (synchronous or asynchronous) transactions. + -# Define and execute transactions using NdbConnection and Ndb*Operation. -# Delete Ndb objects + -# Delete connection to cluster The main structure of a transaction is as follows: - -# Start transaction - -# Add and define operations (associated with the transaction) + -# Start transaction, a NdbConnection + -# Add and define operations (associated with the transaction), + Ndb*Operation -# Execute transaction The execute can be of two different types, @@ -71,8 +78,8 @@ At this step the transaction is being defined. It is not yet sent to the NDB kernel. -# Add and define operations to the transaction - (using NdbConnection::getNdbOperation and - methods from class NdbOperation). + (using NdbConnection::getNdb*Operation and + methods from class Ndb*Operation). The transaction is still not sent to the NDB kernel. -# Execute the transaction (using NdbConnection::execute). -# Close the transaction (using Ndb::closeTransaction). @@ -82,20 +89,21 @@ To execute several parallel synchronous transactions, one can either use multiple Ndb objects in several threads or start multiple applications programs. + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL Another way to execute several parallel transactions is to use asynchronous transactions. - +#endif @section secNdbOperations Operations Each transaction (NdbConnection object) consist of a list of - operations (NdbOperation or NdbIndexOperation objects. - NdbIndexOperation is used for accessing tables through secondary indexes). + operations (Ndb*Operation objects). Operations are of two different kinds: -# standard operations, and -# interpreted program operations. -

Standard Operations

+

Single row operations

After the operation is created using NdbConnection::getNdbOperation (or NdbConnection::getNdbIndexOperation), it is defined in the following three steps: @@ -106,36 +114,42 @@ -# Specify attribute actions (e.g. using NdbOperation::getValue) - Example code (using an NdbOperation): + Example code (using an NdbOperation and excluding error handling): @code - MyOperation = MyConnection->getNdbOperation("MYTABLENAME"); // 1. Create - if (MyOperation == NULL) APIERROR(MyConnection->getNdbError()); + // 1. Create + MyOperation= MyConnection->getNdbOperation("MYTABLENAME"); - MyOperation->readTuple(); // 2. Define type of operation - MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions + // 2. Define type of operation and lock mode + MyOperation->readTuple(NdbOperation::LM_Read); + + // 3. Specify Search Conditions + MyOperation->equal("ATTR1", i); - MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions - if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); + // 4. Attribute Actions + MyRecAttr= MyOperation->getValue("ATTR2", NULL); @endcode - For more examples, see @ref ndbapi_example1.cpp and @ref ndbapi_example2.cpp. + For more examples, see @ref ndbapi_example1.cpp and + @ref ndbapi_example2.cpp. - Example code using an NdbIndexOperation: + Example code (using an NdbIndexOperation and excluding error handling): @code - MyOperation = // 1. Create - MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME"); - if (MyOperation == NULL) APIERROR(MyConnection->getNdbError()); + // 1. Create + MyOperation= MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME"); - MyOperation->readTuple(); // 2. Define type of operation - MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions + // 2. Define type of operation and lock mode + MyOperation->readTuple(NdbOperation::LM_Read); - MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions - if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError()); + // 3. Specify Search Conditions + MyOperation->equal("ATTR1", i); + + // 4. Attribute Actions + MyRecAttr = MyOperation->getValue("ATTR2", NULL); @endcode For more examples, see @ref ndbapi_example4.cpp. -

Step 1: Define Standard Operation Type

- The following types of standard operations exist: +

Step 1: Define single row operation type

+ The following types of operations exist: -# NdbOperation::insertTuple : inserts a non-existing tuple -# NdbOperation::writeTuple : @@ -146,79 +160,26 @@ -# NdbOperation::deleteTuple : deletes an existing tuple -# NdbOperation::readTuple : - reads an existing tuple - -# NdbOperation::readTupleExclusive : - reads an existing tuple using an exclusive lock - -# NdbOperation::simpleRead : - reads an existing tuple (using shared read lock), - but releases lock immediately after read - -# NdbOperation::committedRead : - reads committed tuple - -# NdbOperation::dirtyUpdate : - updates an existing tuple, but releases lock immediately - after read (uses dirty lock) - -# NdbOperation::dirtyWrite : - updates or writes a tuple, but releases lock immediately - after read (uses dirty lock) + reads an existing tuple with specified lock mode All of these operations operate on the unique tuple key. (When NdbIndexOperation is used then all of these operations - operate on a defined secondary index.) - - - Some comments: - - NdbOperation::simpleRead and - NdbOperation::committedRead can execute on the same transaction - as the above operations but will release its locks immediately - after reading the tuple. - NdbOperation::simpleRead will always read the latest version - of the tuple. - Thus it will wait until it can acquire a shared read lock on - the tuple. - NdbOperation::committedRead will read the latest committed - version of the tuple. -
- Both NdbOperation::simpleRead and NdbOperation::committedRead - are examples of consistent reads which are not repeatable. - All reads read the latest version if updates were made by the same - transaction. - Errors on simple read are only reported by the NdbOperation object. - These error codes are not transferred to the NdbConnection object. - - NdbOperation::dirtyUpdate and NdbOperation::dirtyWrite - will execute in the same transaction - but will release the lock immediately after updating the - tuple. - It will wait on the lock until it can acquire an exclusive - write lock. - In a replicated version of NDB Cluster NdbOperation::dirtyUpdate - can lead to inconsistency between the replicas. - Examples of when it could be used is - to update statistical counters on tuples which are "hot-spots". + operate on a defined unique hash index.) @note If you want to define multiple operations within the same transaction, - then you need to call NdbConnection::getNdbOperation - (or NdbConnection::getNdbIndexOperation) for each + then you need to call NdbConnection::getNdb*Operation for each operation. -

Step 2: Specify Search Conditions

The search condition is used to select tuples. - (In the current NdbIndexOperation implementation - this means setting the value of - the secondary index attributes of the wanted tuple.) - If a tuple identity is used, then NdbOperation::setTupleId - is used to define the search key when inserting new tuples. - Otherwise, NdbOperation::equal is used. - - For NdbOperation::insertTuple it is also allowed to define the + For NdbOperation::insertTuple it is also allowed to define the search key by using NdbOperation::setValue. The NDB API will automatically detect that it is supposed to use NdbOperation::equal instead. For NdbOperation::insertTuple it is not necessary to use NdbOperation::setValue on key attributes before other attributes. -

Step 3: Specify Attribute Actions

Now it is time to define which attributes should be read or updated. Deletes can neither read nor set values, read can only read values and @@ -495,7 +456,7 @@ should match the automatic numbering to make it easier to debug the interpreted program. - +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @section secAsync Asynchronous Transactions The asynchronous interface is used to increase the speed of transaction executing by better utilizing the connection @@ -583,7 +544,7 @@ The poll method returns the number of transactions that have finished processing and executed their callback methods. - + @note When an asynchronous transaction has been started and sent to the NDB kernel, it is not allowed to execute any methods on objects belonging to this transaction until the transaction @@ -595,7 +556,7 @@ More about how transactions are send the NDB Kernel is available in section @ref secAdapt. - +#endif @section secError Error Handling @@ -671,6 +632,7 @@ * @include ndbapi_example4.cpp */ +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * @page select_all.cpp select_all.cpp * @include select_all.cpp @@ -680,6 +642,7 @@ * @page ndbapi_async.cpp ndbapi_async.cpp * @include ndbapi_async.cpp */ +#endif /** * @page ndbapi_scan.cpp ndbapi_scan.cpp @@ -691,8 +654,7 @@ @page secAdapt Adaptive Send Algorithm At the time of "sending" the transaction - (using NdbConnection::execute, NdbConnection::executeAsynch, - Ndb::sendPreparedTransactions, or Ndb::sendPollNdb), the transactions + (using NdbConnection::execute), the transactions are in reality not immediately transfered to the NDB Kernel. Instead, the "sent" transactions are only kept in a special send list (buffer) in the Ndb object to which they belong. @@ -847,12 +809,56 @@ then a timeout error occurs. Concurrent transactions (parallel application programs, thread-based - applications, or applications with asynchronous transactions) + applications) sometimes deadlock when they try to access the same information. Applications need to be programmed so that timeout errors occurring due to deadlocks are handled. This generally means that the transaction encountering timeout should be rolled back and restarted. + + @section secHint Hints and performance + + NDB API can be hinted to select a particular transaction coordinator. + The default method is round robin where each set of new transactions + is placed on the next NDB kernel node. + By providing a distribution key (usually the primary key + of the mostly used table of the transaction) for a record + the transaction will be placed on the node where the primary replica + of that record resides. + Note that this is only a hint, the system can + be under reconfiguration and then the NDB API + will use select the transaction coordinator without using + this hint. + + Placing the transaction coordinator close + to the actual data used in the transaction can in many cases + improve performance significantly. This is particularly true for + systems using TCP/IP. A system using Solaris and a 500 MHz processor + has a cost model for TCP/IP communication which is: + + 30 microseconds + (100 nanoseconds * no of Bytes) + + This means that if we can ensure that we use "popular" links we increase + buffering and thus drastically reduce the communication cost. + Systems using SCI has a different cost model which is: + + 5 microseconds + (10 nanoseconds * no of Bytes) + + Thus SCI systems are much less dependent on selection of + transaction coordinators. + Typically TCP/IP systems spend 30-60% of the time during communication, + whereas SCI systems typically spend 5-10% of the time during + communication. + Thus SCI means that less care from the NDB API programmer is + needed and great scalability can be achieved even for applications using + data from many parts of the database. + + A simple example is an application that uses many simple updates where + a transaction needs to update one record. + This record has a 32 bit primary key, + which is also the distribution key. + Then the keyData will be the address of the integer + of the primary key and keyLen will be 4. */ #ifndef Ndb_H @@ -945,6 +951,11 @@ public: * Semaphores, mutexes and so forth are easy ways of issuing memory * barriers without having to bother about the memory barrier concept. * + */ + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL +// to be documented later +/* * If one Ndb object is used to handle parallel transactions through the * asynchronous programming interface, please read the notes regarding * asynchronous transactions (Section @ref secAsync). @@ -955,6 +966,8 @@ public: * asynchronous transaction or the methods for * synchronous transactions but not both. */ +#endif + class Ndb { friend class NdbReceiver; @@ -976,29 +989,30 @@ public: * @{ */ /** - * The starting point of your application code is to create an - * Ndb object. - * This object represents the NDB kernel and is the main - * object used in interaction with the NDB kernel. + * The Ndb object represents a connection to a database. * + * @note the init() method must be called before it may be used + * + * @param ndb_cluster_connection is a connection to a cluster containing + * the database to be used * @param aCatalogName is the name of the catalog you want to use. * @note The catalog name provides a name space for the tables and * indexes created in any connection from the Ndb object. * @param aSchemaName is the name of the schema you - * want to use. It is optional and defaults to the "def" schema. + * want to use. * @note The schema name provides an additional name space * for the tables and indexes created in a given catalog. - * @note The methods get/setDatabaseName and get/setDatabaseSchemaName - * are equivalent to get/setCatalogName and get/setSchemaName. - * The get/setDatabaseName and get/setDatabaseSchemaName are - * deprecated. */ - Ndb(const char* aCatalogName = "", const char* aSchemaName = "def"); Ndb(Ndb_cluster_connection *ndb_cluster_connection, const char* aCatalogName = "", const char* aSchemaName = "def"); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // depricated + Ndb(const char* aCatalogName = "", const char* aSchemaName = "def"); +#endif ~Ndb(); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * The current catalog name can be fetched by getCatalogName. * @@ -1026,7 +1040,7 @@ public: * @param aSchemaName is the new name of the current schema */ void setSchemaName(const char * aSchemaName); - +#endif /** * The current database name can be fetched by getDatabaseName. @@ -1057,22 +1071,22 @@ public: void setDatabaseSchemaName(const char * aDatabaseSchemaName); /** - * Before anything else it is necessary to initialize (start) - * the Ndb object. + * Initializes the Ndb object * * @param maxNoOfTransactions * Maximum number of parallel - * NdbConnection objects that should be handled by the Ndb object. - * A value larger than 1024 will be downgraded to 1024. - * This means that one Ndb object can handle at most 1024 parallel - * transactions. - * @return 0 if successful, -1 otherwise. + * NdbConnection objects that can be handled by the Ndb object. + * Maximum value is 1024. * - * @note The internal implementation multiplies this value - * with 3. + * @note each scan or index scan operation uses one extra + * NdbConnection object + * + * @return 0 if successful, -1 otherwise. */ int init(int maxNoOfTransactions = 4); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // depricated /** * Wait for Ndb object to successfully set-up connections to * the NDB kernel. @@ -1085,8 +1099,8 @@ public: * @return 0: Ndb is ready and timeout has not occurred.
* -1: Timeout has expired */ - int waitUntilReady(int timeout = 60); +#endif /** @} *********************************************************************/ @@ -1096,30 +1110,55 @@ public: */ /** - * Query the database for schema information - * (without performing any transaction). + * Get an object for retrieving or manipulating database schema information + * + * @note this object operates outside any transaction * * @return Object containing meta information about all tables * in NDB Cluster. */ class NdbDictionary::Dictionary* getDictionary() const; - NdbEventOperation* createEventOperation(const char* eventName, - const int bufferLength); - int dropEventOperation(NdbEventOperation*); - void monitorEvent(NdbEventOperation *, NdbEventCallback, void*); - int pollEvents(int aMillisecondNumber); + + /** @} *********************************************************************/ + + /** + * @name Event subscriptions + * @{ + */ /** - * Get the application node identity. + * Create a subcription to an event defined in the database * - * Each node (DB nodes, Applications, and Management Servers) - * has its own node identity in the NDB Cluster. - * See documentation for the management server configuration file. + * @param eventName + * unique identifier of the event + * @param bufferLength + * buffer size for storing event data * - * @return Node id of this application. + * @return Object representing an event, NULL on failure */ - int getNodeId(); + NdbEventOperation* createEventOperation(const char* eventName, + const int bufferLength); + /** + * Drop a subscription to an event + * + * @param eventName + * unique identifier of the event + * + * @return 0 on success + */ + int dropEventOperation(NdbEventOperation* eventName); + + /** + * Wait for an event to occur. Will return as soon as an event + * is detected on any of the created events. + * + * @param aMillisecondNumber + * maximum time to wait + * + * @return the number of events that has occured, -1 on failure + */ + int pollEvents(int aMillisecondNumber); /** @} *********************************************************************/ @@ -1129,71 +1168,19 @@ public: */ /** - * This method returns an NdbConnection which caters for the transaction. - * When the transaction is completed it must be closed. - * The Ndb::closeTransaction also return the NdbConnection object - * and all other memory related to the transaction. - * Failure to close the transaction will lead to memory leakage. - * The transaction must be closed independent of its outcome, i.e. - * even if there is an error. + * Start a transaction + * + * @note When the transaction is completed it must be closed using + * Ndb::closeTransaction or NdbConnection::close. + * The transaction must be closed independent of its outcome, i.e. + * even if there is an error. + * + * @param prio Not implemented + * @param keyData Pointer to partition key to be used for deciding + * which node to run the Transaction Coordinator on + * @param keyLen Length of partition key expressed in bytes * - * NDB API can be hinted to select a particular transaction coordinator. - * The default method is round robin where each set of new transactions - * is placed on the next NDB kernel node. - * By providing a distribution key (usually the primary key - * of the mostly used table of the transaction) for a record - * the transaction will be placed on the node where the primary replica - * of that record resides. - * Note that this is only a hint, the system can - * be under reconfiguration and then the NDB API - * will use select the transaction coordinator without using - * this hint. - * - * Placing the transaction coordinator close - * to the actual data used in the transaction can in many cases - * improve performance significantly. This is particularly true for - * systems using TCP/IP. A system using Solaris and a 500 MHz processor - * has a cost model for TCP/IP communication which is: - * - * 30 microseconds + (100 nanoseconds * no of Bytes) - * - * This means that if we can ensure that we use "popular" links we increase - * buffering and thus drastically reduce the communication cost. - * Systems using SCI has a different cost model which is: - * - * 5 microseconds + (10 nanoseconds * no of Bytes) - * - * Thus SCI systems are much less dependent on selection of - * transaction coordinators. - * Typically TCP/IP systems spend 30-60% of the time during communication, - * whereas SCI systems typically spend 5-10% of the time during - * communication. - * Thus SCI means that less care from the NDB API programmer is - * needed and great scalability can be achieved even for applications using - * data from many parts of the database. - * - * A simple example is an application that uses many simple updates where - * a transaction needs to update one record. - * This record has a 32 bit primary key, - * which is also the distribution key. - * Then the keyData will be the address of the integer - * of the primary key and keyLen will be 4. - * - * @note Transaction priorities are not yet supported. - * - * @param prio The priority of the transaction.
- * Priority 0 is the highest priority and is used - * for short transactions with requirements on low delay.
- * Priority 1 is a medium priority for short transactions. - *
- * Priority 2 is a medium priority for long transactions.
- * Priority 3 is a low priority for long transactions.
- * This parameter is not currently used, - * and can be set to any value - * @param keyData Pointer to distribution key - * @param keyLen Length of distribution key expressed in bytes - * - * @return NdbConnection object, or NULL if method failed. + * @return NdbConnection object, or NULL on failure. */ NdbConnection* startTransaction(Uint32 prio = 0, const char * keyData = 0, @@ -1233,7 +1220,10 @@ public: #endif /** - * When a transactions is completed, the transaction has to be closed. + * Close a transaction. + * + * @note should be called after the transaction has completed, irrespective + * of success or failure * * @note It is not allowed to call Ndb::closeTransaction after sending the * transaction asynchronously with either @@ -1245,10 +1235,11 @@ public: * If the transaction is not committed it will be aborted. */ void closeTransaction(NdbConnection* aConnection); - /** @} *********************************************************************/ +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // to be documented later /** * @name Asynchronous Transactions * @{ @@ -1259,11 +1250,10 @@ public: * Will return as soon as at least 'minNoOfEventsToWakeUp' * of them have completed, or the maximum time given as timeout has passed. * - * @param aMillisecondNumber Maximum time to wait for transactions - * to complete. - * Polling without wait is achieved by setting the - * timer to zero. - * Time is expressed in milliseconds. + * @param aMillisecondNumber + * Maximum time to wait for transactions to complete. Polling + * without wait is achieved by setting the timer to zero. + * Time is expressed in milliseconds. * @param minNoOfEventsToWakeup Minimum number of transactions * which has to wake up before the poll-call will return. * If minNoOfEventsToWakeup is @@ -1325,6 +1315,7 @@ public: int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT, int minNoOfEventsToWakeup = 1, int forceSend = 0); +#endif /** @} *********************************************************************/ @@ -1336,7 +1327,7 @@ public: /** * Get the NdbError object * - * The NdbError object is valid until you call a new NDB API method. + * @note The NdbError object is valid until a new NDB API method is called. */ const NdbError & getNdbError() const; @@ -1348,37 +1339,36 @@ public: const NdbError & getNdbError(int errorCode); + /** @} *********************************************************************/ + +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + /** + * Get the application node identity. + * + * @return Node id of this application. + */ + int getNodeId(); + /** * setConnectString - * @param connectString - the connectString has the following format: - * @code - * "nodeid=;host=host://:; - * host=host://:;..." - * @endcode - * or - * @code - * "nodeid=;host=:;host=:;..." - * @endcode + * + * @param connectString - see MySQL ref manual for format */ static void setConnectString(const char * connectString); bool usingFullyQualifiedNames(); - /** @} *********************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** * Different types of tampering with the NDB Cluster. * Only for debugging purposes only. */ enum TamperType { - LockGlbChp = 1, ///< Lock GCP - UnlockGlbChp, ///< Unlock GCP - CrashNode, ///< Crash an NDB node - ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster - InsertError ///< Execute an error in NDB Cluster - ///< (may crash system) + LockGlbChp = 1, ///< Lock GCP + UnlockGlbChp, ///< Unlock GCP + CrashNode, ///< Crash an NDB node + ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster + InsertError ///< Execute an error in NDB Cluster + ///< (may crash system) }; /** @@ -1397,9 +1387,7 @@ public: * on type of tampering. */ int NdbTamper(TamperType aAction, int aNode); -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** * Return a unique tuple id for a table. The id sequence is * ascending but may contain gaps. @@ -1429,9 +1417,7 @@ public: bool increase); bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase); Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op); -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /** */ NdbConnection* hupp( NdbConnection* ); diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp index f0f44170ed7..80500dee225 100644 --- a/ndb/include/ndbapi/NdbConnection.hpp +++ b/ndb/include/ndbapi/NdbConnection.hpp @@ -31,6 +31,8 @@ class Ndb; class NdbBlob; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL +// to be documented later /** * NdbAsynchCallback functions are used when executing asynchronous * transactions (using NdbConnection::executeAsynchPrepare, or @@ -39,6 +41,7 @@ class NdbBlob; * See @ref secAsync for more information. */ typedef void (* NdbAsynchCallback)(int, NdbConnection*, void*); +#endif /** * Commit type of transaction @@ -184,7 +187,8 @@ public: * @note All operations within the same transaction need to * be initialized with this method. * - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable) * @return Pointer to an NdbOperation object if successful, otherwise NULL. */ NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable); @@ -204,7 +208,8 @@ public: * get the NdbConnection object which * was fetched by startTransaction pointing to this operation. * - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable) + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable) * @return pointer to an NdbOperation object if successful, otherwise NULL */ NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable); @@ -226,12 +231,15 @@ public: * get the NdbConnection object which * was fetched by startTransaction pointing to this operation. * - * @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). + * @param anIndex + An index object (fetched by NdbDictionary::Dictionary::getIndex). + * @param aTable + A table object (fetched by NdbDictionary::Dictionary::getTable). * @return pointer to an NdbOperation object if successful, otherwise NULL */ - NdbIndexScanOperation* getNdbIndexScanOperation(const NdbDictionary::Index * anIndex, - const NdbDictionary::Table * aTable); + NdbIndexScanOperation* getNdbIndexScanOperation + (const NdbDictionary::Index * anIndex, + const NdbDictionary::Table * aTable); /** * Get an operation from NdbIndexOperation idlelist and @@ -251,8 +259,10 @@ public: * get the NdbConnection object that * was fetched by startTransaction pointing to this operation. * - * @param anIndex An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @param aTable A table object (fetched by NdbDictionary::Dictionary::getTable). + * @param anIndex + * An index object (fetched by NdbDictionary::Dictionary::getIndex). + * @param aTable + * A table object (fetched by NdbDictionary::Dictionary::getTable). * @return Pointer to an NdbIndexOperation object if * successful, otherwise NULL */ @@ -289,6 +299,8 @@ public: AbortOption abortOption = AbortOnError, int force = 0 ); +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + // to be documented later /** * Prepare an asynchronous transaction. * @@ -334,7 +346,7 @@ public: NdbAsynchCallback aCallback, void* anyObject, AbortOption abortOption = AbortOnError); - +#endif /** * Refresh * Update timeout counter of this transaction @@ -397,14 +409,14 @@ public: * (Note that there has to be an NdbConnection::execute call * with Ndb::Commit for the GCI to be available.) */ - int getGCI(); + int getGCI(); /** * Get transaction identity. * * @return Transaction id. */ - Uint64 getTransactionId(); + Uint64 getTransactionId(); /** * Returns the commit status of the transaction. diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 4acca0f3d96..476cc4699be 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -166,7 +166,7 @@ public: * The builtin column types */ enum Type { - Undefined=0,///< Undefined + Undefined=0, ///< Undefined Tinyint, ///< 8 bit. 1 byte signed integer, can be used in array Tinyunsigned, ///< 8 bit. 1 byte unsigned integer, can be used in array Smallint, ///< 16 bit. 2 byte signed integer, can be used in array @@ -373,16 +373,11 @@ public: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL const Table * getBlobTable() const; - /** - * @name ODBC Specific methods - * @{ - */ - void setAutoIncrement(bool); + void setAutoIncrement(bool); bool getAutoIncrement() const; void setAutoIncrementInitialValue(Uint64 val); void setDefaultValue(const char*); const char* getDefaultValue() const; - /** @} *******************************************************************/ static const Column * FRAGMENT; static const Column * ROW_COUNT;