mirror of
				https://github.com/MariaDB/server.git
				synced 2025-11-03 14:33:32 +03:00 
			
		
		
		
	The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
  object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
 - Less memory usage for table object
 - Faster opening of tables (if it's has been in use or is in table definition cache)
 - Allow you to cache many table definitions objects
 - Faster drop of table
mysql-test/mysql-test-run.sh:
  Fixed some problems with --gdb option
  Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
  More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
  Removed old (now wrong) result
mysql-test/r/innodb.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
  Extra flush table test
mysql-test/r/ndb_bitfield.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
  Remove tables left from old tests
mysql-test/r/temp_table.result:
  Test truncate with temporary tables
mysql-test/r/variables.result:
  Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
  More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
  Extra flush table test
mysql-test/t/multi_update.test:
  Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
  Remove tables left from old tests
mysql-test/t/temp_table.test:
  Test truncate with temporary tables
mysql-test/t/variables.test:
  Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
  Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
  Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
  More DBUG
mysys/mf_pack.c:
  Better comment
mysys/mf_tempdir.c:
  More DBUG
  Ensure that we call cleanup_dirname() on all temporary directory paths.
  
  If we don't do this, we will get a failure when comparing temporary table
  names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
  Indentation fix
sql/examples/ha_example.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
  Update for table definition cache:
  - Field creation now takes TABLE_SHARE instead of TABLE as argument
    (This is becasue field definitions are now cached in TABLE_SHARE)
    When a field is created, one now must call field->init(TABLE) before using it
  - Use s->db instead of s->table_cache_key
  - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
  - make_field() takes TABLE_SHARE as argument instead of TABLE
  - move_field() -> move_field_offset()
sql/field.h:
  Update for table definition cache:
  - Field creation now takes TABLE_SHARE instead of TABLE as argument
    (This is becasue field definitions are now cached in TABLE_SHARE)
    When a field is created, one now must call field->init(TABLE) before using it
  - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
  - make_field() takes TABLE_SHARE as argument instead of TABLE
  - move_field() -> move_field_offset()
sql/ha_archive.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Changed name of argument create() to not hide internal 'table' variable.
  table->s  -> table_share
sql/ha_berkeley.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Fixed comments
  Remove index variable and replace with pointers (simple optimization)
  move_field() -> move_field_offset()
  Removed some strlen() calls
sql/ha_federated.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Remove not needed fn_format()
  Fixed for new table->s structure
sql/ha_myisam.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Don't set 'is_view' for MERGE tables
  Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Fixed wrong calls to strxnmov()
  Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
  drop_table -> intern_drop_table()
  table->s -> table_share
  Move part_info to TABLE
  Fixed comments & DBUG print's
  New arguments to print_error()
sql/ha_ndbcluster.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  We can't set up or use part_info when creating handler as there is not yet any table object
  New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Got set_part_info() from Mikael
sql/handler.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  ha_delete_table() now also takes database as an argument
  handler::ha_open() now takes TABLE as argument
  ha_open() now calls ha_allocate_read_write_set()
  Simplify ha_allocate_read_write_set()
  Remove ha_deallocate_read_write_set()
  Use table_share (Cached by table definition cache)
sql/handler.h:
  New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
  Remove ha_deallocate_read_write_set()
  get_new_handler() now takes TABLE_SHARE as argument
  ha_delete_table() now gets database as argument
sql/item.cc:
  table_name and db are now LEX_STRING objects
  When creating fields, we have now have to call field->init(table)
  move_field -> move_field_offset()
sql/item.h:
  tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
   instead of BLOB
sql/item_cmpfunc.cc:
  Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
  Assert if new not handled cast type
sql/item_func.cc:
  When creating fields, we have now have to call field->init(table)
  dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
  Trivial code cleanups
sql/item_sum.cc:
  When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
  Item_func_str_to_date::tmp_table_field() now replaced by call to
   tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
  Simply tmp_table_field()
sql/item_uniq.cc:
  When creating fields, we have now have to call field->init(table)
sql/key.cc:
  Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
  More debugging
  Use create_table_def_key() to create key for table cache
  Allocate TABLE_SHARE properly when creating name lock
  Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
  New functions for table definition cache
  New interfaces to a lot of functions.
  New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
  Added support for table definition cache of size 'table_def_size'
  Fixed som calls to strnmov()
  Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
  Use new interfaces
  Fixed warnings from valgrind
sql/parse_file.cc:
  Safer calls to strxnmov()
  Fixed typo
sql/set_var.cc:
  Added variable 'table_definition_cache'
  Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
  Use new interface
sql/sp.cc:
  Proper use of TABLE_SHARE
sql/sp_head.cc:
  Remove compiler warnings
  We have now to call field->init(table)
sql/sp_head.h:
  Pointers to parsed strings are now const
sql/sql_acl.cc:
  table_name is now a LEX_STRING
sql/sql_base.cc:
  Main implementation of table definition cache
  (The #ifdef's are there for the future when table definition cache will replace open table cache)
  Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
  Views are not yet cached; For the moment we only cache if a table is a view or not.
  
  Faster implementation of find_temorary_table()
  Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
  Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
  table_cache_key and table_name are now LEX_STRING
  'sDBUG print fixes
sql/sql_class.cc:
  table_cache_key is now a LEX_STRING
  safer strxnmov()
sql/sql_class.h:
  Added number of open table shares (table definitions)
sql/sql_db.cc:
  safer strxnmov()
sql/sql_delete.cc:
  Use new interface to find_temporary_table()
sql/sql_derived.cc:
  table_name is now a LEX_STRING
sql/sql_handler.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
  Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
  Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
  Safer strxnmov()
sql/sql_parse.cc:
  Better error if wrong DB name
sql/sql_partition.cc:
  part_info moved to TABLE from TABLE_SHARE
  Indentation changes
sql/sql_select.cc:
  Indentation fixes
  Call field->init(TABLE) for new created fields
  Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
  Call field->init(TABLE) for new created fields
sql/sql_show.cc:
  table_name is now a LEX_STRING
  part_info moved to TABLE
sql/sql_table.cc:
  Use table definition cache to speed up delete of tables
  Fixed calls to functions with new interfaces
  Don't use 'share_not_to_be_used'
  Instead of doing openfrm() when doing repair, we now have to call
  get_table_share() followed by open_table_from_share().
  Replace some fn_format() with faster unpack_filename().
  Safer strxnmov()
  part_info is now in TABLE
  Added Mikaels patch for partition and ALTER TABLE
  Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
  table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
  safer strxnmov()
  Removed compiler warnings
sql/sql_update.cc:
  Call field->init(TABLE) after field is created
sql/sql_view.cc:
  safer strxnmov()
  Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
  Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
  Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
  
  The table opening process now works the following way:
  - Create common TABLE_SHARE object
  - Read the .frm file and unpack it into the TABLE_SHARE object
  - Create a TABLE object based on the information in the TABLE_SHARE
    object and open a handler to the table object
  
  open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
  TABLE objects for the same database table now share a common TABLE_SHARE object
  In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
  Changed arguments to rea_create_table() to have same order as other functions
  Call field->init(table) for new created fields
sql/unireg.h:
  Added OPEN_VIEW
strings/strxnmov.c:
  Change strxnmov() to always add end \0
  This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
		
	
		
			
				
	
	
		
			419 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			419 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* Copyright (C) 2000 MySQL AB
 | 
						|
 | 
						|
   This program is free software; you can redistribute it and/or modify
 | 
						|
   it under the terms of the GNU General Public License as published by
 | 
						|
   the Free Software Foundation; either version 2 of the License, or
 | 
						|
   (at your option) any later version.
 | 
						|
 | 
						|
   This program is distributed in the hope that it will be useful,
 | 
						|
   but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
						|
   GNU General Public License for more details.
 | 
						|
 | 
						|
   You should have received a copy of the GNU General Public License
 | 
						|
   along with this program; if not, write to the Free Software
 | 
						|
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
 | 
						|
 | 
						|
/* Routines to handle mallocing of results which will be freed the same time */
 | 
						|
 | 
						|
#include <my_global.h>
 | 
						|
#include <my_sys.h>
 | 
						|
#include <m_string.h>
 | 
						|
#undef EXTRA_DEBUG
 | 
						|
#define EXTRA_DEBUG
 | 
						|
 | 
						|
 | 
						|
/*
 | 
						|
  Initialize memory root
 | 
						|
 | 
						|
  SYNOPSIS
 | 
						|
    init_alloc_root()
 | 
						|
      mem_root       - memory root to initialize
 | 
						|
      block_size     - size of chunks (blocks) used for memory allocation
 | 
						|
                       (It is external size of chunk i.e. it should include
 | 
						|
                        memory required for internal structures, thus it
 | 
						|
                        should be no less than ALLOC_ROOT_MIN_BLOCK_SIZE)
 | 
						|
      pre_alloc_size - if non-0, then size of block that should be
 | 
						|
                       pre-allocated during memory root initialization.
 | 
						|
 | 
						|
  DESCRIPTION
 | 
						|
    This function prepares memory root for further use, sets initial size of
 | 
						|
    chunk for memory allocation and pre-allocates first block if specified.
 | 
						|
    Altough error can happen during execution of this function if
 | 
						|
    pre_alloc_size is non-0 it won't be reported. Instead it will be
 | 
						|
    reported as error in first alloc_root() on this memory root.
 | 
						|
*/
 | 
						|
 | 
						|
void init_alloc_root(MEM_ROOT *mem_root, uint block_size,
 | 
						|
		     uint pre_alloc_size __attribute__((unused)))
 | 
						|
{
 | 
						|
  DBUG_ENTER("init_alloc_root");
 | 
						|
  DBUG_PRINT("enter",("root: 0x%lx", mem_root));
 | 
						|
  mem_root->free= mem_root->used= mem_root->pre_alloc= 0;
 | 
						|
  mem_root->min_malloc= 32;
 | 
						|
  mem_root->block_size= block_size - ALLOC_ROOT_MIN_BLOCK_SIZE;
 | 
						|
  mem_root->error_handler= 0;
 | 
						|
  mem_root->block_num= 4;			/* We shift this with >>2 */
 | 
						|
  mem_root->first_block_usage= 0;
 | 
						|
 | 
						|
#if !(defined(HAVE_purify) && defined(EXTRA_DEBUG))
 | 
						|
  if (pre_alloc_size)
 | 
						|
  {
 | 
						|
    if ((mem_root->free= mem_root->pre_alloc=
 | 
						|
	 (USED_MEM*) my_malloc(pre_alloc_size+ ALIGN_SIZE(sizeof(USED_MEM)),
 | 
						|
			       MYF(0))))
 | 
						|
    {
 | 
						|
      mem_root->free->size= pre_alloc_size+ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
      mem_root->free->left= pre_alloc_size;
 | 
						|
      mem_root->free->next= 0;
 | 
						|
    }
 | 
						|
  }
 | 
						|
#endif
 | 
						|
  DBUG_VOID_RETURN;
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
/*
 | 
						|
  SYNOPSIS
 | 
						|
    reset_root_defaults()
 | 
						|
    mem_root        memory root to change defaults of
 | 
						|
    block_size      new value of block size. Must be greater or equal
 | 
						|
                    than ALLOC_ROOT_MIN_BLOCK_SIZE (this value is about
 | 
						|
                    68 bytes and depends on platform and compilation flags)
 | 
						|
    pre_alloc_size  new size of preallocated block. If not zero,
 | 
						|
                    must be equal to or greater than block size,
 | 
						|
                    otherwise means 'no prealloc'.
 | 
						|
  DESCRIPTION
 | 
						|
    Function aligns and assigns new value to block size; then it tries to
 | 
						|
    reuse one of existing blocks as prealloc block, or malloc new one of
 | 
						|
    requested size. If no blocks can be reused, all unused blocks are freed
 | 
						|
    before allocation.
 | 
						|
*/
 | 
						|
 | 
						|
void reset_root_defaults(MEM_ROOT *mem_root, uint block_size,
 | 
						|
                         uint pre_alloc_size __attribute__((unused)))
 | 
						|
{
 | 
						|
  DBUG_ASSERT(alloc_root_inited(mem_root));
 | 
						|
 | 
						|
  mem_root->block_size= block_size - ALLOC_ROOT_MIN_BLOCK_SIZE;
 | 
						|
#if !(defined(HAVE_purify) && defined(EXTRA_DEBUG))
 | 
						|
  if (pre_alloc_size)
 | 
						|
  {
 | 
						|
    uint size= pre_alloc_size + ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    if (!mem_root->pre_alloc || mem_root->pre_alloc->size != size)
 | 
						|
    {
 | 
						|
      USED_MEM *mem, **prev= &mem_root->free;
 | 
						|
      /*
 | 
						|
        Free unused blocks, so that consequent calls
 | 
						|
        to reset_root_defaults won't eat away memory.
 | 
						|
      */
 | 
						|
      while (*prev)
 | 
						|
      {
 | 
						|
        mem= *prev;
 | 
						|
        if (mem->size == size)
 | 
						|
        {
 | 
						|
          /* We found a suitable block, no need to do anything else */
 | 
						|
          mem_root->pre_alloc= mem;
 | 
						|
          return;
 | 
						|
        }
 | 
						|
        if (mem->left + ALIGN_SIZE(sizeof(USED_MEM)) == mem->size)
 | 
						|
        {
 | 
						|
          /* remove block from the list and free it */
 | 
						|
          *prev= mem->next;
 | 
						|
          my_free((gptr) mem, MYF(0));
 | 
						|
        }
 | 
						|
        else
 | 
						|
          prev= &mem->next;
 | 
						|
      }
 | 
						|
      /* Allocate new prealloc block and add it to the end of free list */
 | 
						|
      if ((mem= (USED_MEM *) my_malloc(size, MYF(0))))
 | 
						|
      {
 | 
						|
        mem->size= size; 
 | 
						|
        mem->left= pre_alloc_size;
 | 
						|
        mem->next= *prev;
 | 
						|
        *prev= mem_root->pre_alloc= mem; 
 | 
						|
      }
 | 
						|
    }
 | 
						|
  }
 | 
						|
  else
 | 
						|
#endif
 | 
						|
    mem_root->pre_alloc= 0;
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size)
 | 
						|
{
 | 
						|
#if defined(HAVE_purify) && defined(EXTRA_DEBUG)
 | 
						|
  reg1 USED_MEM *next;
 | 
						|
  DBUG_ENTER("alloc_root");
 | 
						|
  DBUG_PRINT("enter",("root: 0x%lx", mem_root));
 | 
						|
 | 
						|
  DBUG_ASSERT(alloc_root_inited(mem_root));
 | 
						|
 | 
						|
  Size+=ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
  if (!(next = (USED_MEM*) my_malloc(Size,MYF(MY_WME))))
 | 
						|
  {
 | 
						|
    if (mem_root->error_handler)
 | 
						|
      (*mem_root->error_handler)();
 | 
						|
    DBUG_RETURN((gptr) 0);			/* purecov: inspected */
 | 
						|
  }
 | 
						|
  next->next= mem_root->used;
 | 
						|
  next->size= Size;
 | 
						|
  mem_root->used= next;
 | 
						|
  DBUG_PRINT("exit",("ptr: 0x%lx", (((char*) next)+
 | 
						|
                                    ALIGN_SIZE(sizeof(USED_MEM)))));
 | 
						|
  DBUG_RETURN((gptr) (((char*) next)+ALIGN_SIZE(sizeof(USED_MEM))));
 | 
						|
#else
 | 
						|
  uint get_size, block_size;
 | 
						|
  gptr point;
 | 
						|
  reg1 USED_MEM *next= 0;
 | 
						|
  reg2 USED_MEM **prev;
 | 
						|
  DBUG_ENTER("alloc_root");
 | 
						|
  DBUG_PRINT("enter",("root: 0x%lx", mem_root));
 | 
						|
  DBUG_ASSERT(alloc_root_inited(mem_root));
 | 
						|
 | 
						|
  Size= ALIGN_SIZE(Size);
 | 
						|
  if ((*(prev= &mem_root->free)) != NULL)
 | 
						|
  {
 | 
						|
    if ((*prev)->left < Size &&
 | 
						|
	mem_root->first_block_usage++ >= ALLOC_MAX_BLOCK_USAGE_BEFORE_DROP &&
 | 
						|
	(*prev)->left < ALLOC_MAX_BLOCK_TO_DROP)
 | 
						|
    {
 | 
						|
      next= *prev;
 | 
						|
      *prev= next->next;			/* Remove block from list */
 | 
						|
      next->next= mem_root->used;
 | 
						|
      mem_root->used= next;
 | 
						|
      mem_root->first_block_usage= 0;
 | 
						|
    }
 | 
						|
    for (next= *prev ; next && next->left < Size ; next= next->next)
 | 
						|
      prev= &next->next;
 | 
						|
  }
 | 
						|
  if (! next)
 | 
						|
  {						/* Time to alloc new block */
 | 
						|
    block_size= mem_root->block_size * (mem_root->block_num >> 2);
 | 
						|
    get_size= Size+ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    get_size= max(get_size, block_size);
 | 
						|
 | 
						|
    if (!(next = (USED_MEM*) my_malloc(get_size,MYF(MY_WME))))
 | 
						|
    {
 | 
						|
      if (mem_root->error_handler)
 | 
						|
	(*mem_root->error_handler)();
 | 
						|
      return((gptr) 0);				/* purecov: inspected */
 | 
						|
    }
 | 
						|
    mem_root->block_num++;
 | 
						|
    next->next= *prev;
 | 
						|
    next->size= get_size;
 | 
						|
    next->left= get_size-ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    *prev=next;
 | 
						|
  }
 | 
						|
 | 
						|
  point= (gptr) ((char*) next+ (next->size-next->left));
 | 
						|
  /*TODO: next part may be unneded due to mem_root->first_block_usage counter*/
 | 
						|
  if ((next->left-= Size) < mem_root->min_malloc)
 | 
						|
  {						/* Full block */
 | 
						|
    *prev= next->next;				/* Remove block from list */
 | 
						|
    next->next= mem_root->used;
 | 
						|
    mem_root->used= next;
 | 
						|
    mem_root->first_block_usage= 0;
 | 
						|
  }
 | 
						|
  DBUG_PRINT("exit",("ptr: 0x%lx", (ulong) point));
 | 
						|
  DBUG_RETURN(point);
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
/*
 | 
						|
  Allocate many pointers at the same time.
 | 
						|
 | 
						|
  DESCRIPTION
 | 
						|
    ptr1, ptr2, etc all point into big allocated memory area.
 | 
						|
 | 
						|
  SYNOPSIS
 | 
						|
    multi_alloc_root()
 | 
						|
      root               Memory root
 | 
						|
      ptr1, length1      Multiple arguments terminated by a NULL pointer
 | 
						|
      ptr2, length2      ...
 | 
						|
      ...
 | 
						|
      NULL
 | 
						|
 | 
						|
  RETURN VALUE
 | 
						|
    A pointer to the beginning of the allocated memory block
 | 
						|
    in case of success or NULL if out of memory.
 | 
						|
*/
 | 
						|
 | 
						|
gptr multi_alloc_root(MEM_ROOT *root, ...)
 | 
						|
{
 | 
						|
  va_list args;
 | 
						|
  char **ptr, *start, *res;
 | 
						|
  uint tot_length, length;
 | 
						|
  DBUG_ENTER("multi_alloc_root");
 | 
						|
 | 
						|
  va_start(args, root);
 | 
						|
  tot_length= 0;
 | 
						|
  while ((ptr= va_arg(args, char **)))
 | 
						|
  {
 | 
						|
    length= va_arg(args, uint);
 | 
						|
    tot_length+= ALIGN_SIZE(length);
 | 
						|
  }
 | 
						|
  va_end(args);
 | 
						|
 | 
						|
  if (!(start= (char*) alloc_root(root, tot_length)))
 | 
						|
    DBUG_RETURN(0);                            /* purecov: inspected */
 | 
						|
 | 
						|
  va_start(args, root);
 | 
						|
  res= start;
 | 
						|
  while ((ptr= va_arg(args, char **)))
 | 
						|
  {
 | 
						|
    *ptr= res;
 | 
						|
    length= va_arg(args, uint);
 | 
						|
    res+= ALIGN_SIZE(length);
 | 
						|
  }
 | 
						|
  va_end(args);
 | 
						|
  DBUG_RETURN((gptr) start);
 | 
						|
}
 | 
						|
 | 
						|
#define TRASH_MEM(X) TRASH(((char*)(X) + ((X)->size-(X)->left)), (X)->left)
 | 
						|
 | 
						|
/* Mark all data in blocks free for reusage */
 | 
						|
 | 
						|
static inline void mark_blocks_free(MEM_ROOT* root)
 | 
						|
{
 | 
						|
  reg1 USED_MEM *next;
 | 
						|
  reg2 USED_MEM **last;
 | 
						|
 | 
						|
  /* iterate through (partially) free blocks, mark them free */
 | 
						|
  last= &root->free;
 | 
						|
  for (next= root->free; next; next= *(last= &next->next))
 | 
						|
  {
 | 
						|
    next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    TRASH_MEM(next);
 | 
						|
  }
 | 
						|
 | 
						|
  /* Combine the free and the used list */
 | 
						|
  *last= next=root->used;
 | 
						|
 | 
						|
  /* now go through the used blocks and mark them free */
 | 
						|
  for (; next; next= next->next)
 | 
						|
  {
 | 
						|
    next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    TRASH_MEM(next);
 | 
						|
  }
 | 
						|
 | 
						|
  /* Now everything is set; Indicate that nothing is used anymore */
 | 
						|
  root->used= 0;
 | 
						|
  root->first_block_usage= 0;
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
/*
 | 
						|
  Deallocate everything used by alloc_root or just move
 | 
						|
  used blocks to free list if called with MY_USED_TO_FREE
 | 
						|
 | 
						|
  SYNOPSIS
 | 
						|
    free_root()
 | 
						|
      root		Memory root
 | 
						|
      MyFlags		Flags for what should be freed:
 | 
						|
 | 
						|
        MY_MARK_BLOCKS_FREED	Don't free blocks, just mark them free
 | 
						|
        MY_KEEP_PREALLOC	If this is not set, then free also the
 | 
						|
        		        preallocated block
 | 
						|
 | 
						|
  NOTES
 | 
						|
    One can call this function either with root block initialised with
 | 
						|
    init_alloc_root() or with a bzero()-ed block.
 | 
						|
    It's also safe to call this multiple times with the same mem_root.
 | 
						|
*/
 | 
						|
 | 
						|
void free_root(MEM_ROOT *root, myf MyFlags)
 | 
						|
{
 | 
						|
  reg1 USED_MEM *next,*old;
 | 
						|
  DBUG_ENTER("free_root");
 | 
						|
  DBUG_PRINT("enter",("root: 0x%lx  flags: %u", root, (uint) MyFlags));
 | 
						|
 | 
						|
  if (!root)					/* QQ: Should be deleted */
 | 
						|
    DBUG_VOID_RETURN; /* purecov: inspected */
 | 
						|
  if (MyFlags & MY_MARK_BLOCKS_FREE)
 | 
						|
  {
 | 
						|
    mark_blocks_free(root);
 | 
						|
    DBUG_VOID_RETURN;
 | 
						|
  }
 | 
						|
  if (!(MyFlags & MY_KEEP_PREALLOC))
 | 
						|
    root->pre_alloc=0;
 | 
						|
 | 
						|
  for (next=root->used; next ;)
 | 
						|
  {
 | 
						|
    old=next; next= next->next ;
 | 
						|
    if (old != root->pre_alloc)
 | 
						|
      my_free((gptr) old,MYF(0));
 | 
						|
  }
 | 
						|
  for (next=root->free ; next ;)
 | 
						|
  {
 | 
						|
    old=next; next= next->next;
 | 
						|
    if (old != root->pre_alloc)
 | 
						|
      my_free((gptr) old,MYF(0));
 | 
						|
  }
 | 
						|
  root->used=root->free=0;
 | 
						|
  if (root->pre_alloc)
 | 
						|
  {
 | 
						|
    root->free=root->pre_alloc;
 | 
						|
    root->free->left=root->pre_alloc->size-ALIGN_SIZE(sizeof(USED_MEM));
 | 
						|
    TRASH_MEM(root->pre_alloc);
 | 
						|
    root->free->next=0;
 | 
						|
  }
 | 
						|
  root->block_num= 4;
 | 
						|
  root->first_block_usage= 0;
 | 
						|
  DBUG_VOID_RETURN;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
  Find block that contains an object and set the pre_alloc to it
 | 
						|
*/
 | 
						|
 | 
						|
void set_prealloc_root(MEM_ROOT *root, char *ptr)
 | 
						|
{
 | 
						|
  USED_MEM *next;
 | 
						|
  for (next=root->used; next ; next=next->next)
 | 
						|
  {
 | 
						|
    if ((char*) next <= ptr && (char*) next + next->size > ptr)
 | 
						|
    {
 | 
						|
      root->pre_alloc=next;
 | 
						|
      return;
 | 
						|
    }
 | 
						|
  }
 | 
						|
  for (next=root->free ; next ; next=next->next)
 | 
						|
  {
 | 
						|
    if ((char*) next <= ptr && (char*) next + next->size > ptr)
 | 
						|
    {
 | 
						|
      root->pre_alloc=next;
 | 
						|
      return;
 | 
						|
    }
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
char *strdup_root(MEM_ROOT *root,const char *str)
 | 
						|
{
 | 
						|
  return strmake_root(root, str, (uint) strlen(str));
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
char *strmake_root(MEM_ROOT *root,const char *str, uint len)
 | 
						|
{
 | 
						|
  char *pos;
 | 
						|
  if ((pos=alloc_root(root,len+1)))
 | 
						|
  {
 | 
						|
    memcpy(pos,str,len);
 | 
						|
    pos[len]=0;
 | 
						|
  }
 | 
						|
  return pos;
 | 
						|
}
 | 
						|
 | 
						|
 | 
						|
char *memdup_root(MEM_ROOT *root,const char *str,uint len)
 | 
						|
{
 | 
						|
  char *pos;
 | 
						|
  if ((pos=alloc_root(root,len)))
 | 
						|
    memcpy(pos,str,len);
 | 
						|
  return pos;
 | 
						|
}
 |