1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

MDEV-17399 JSON_TABLE.

atch to get rid of duplicating code of the Create_tmp_table.
This commit is contained in:
Alexey Botchkov
2021-04-12 13:04:01 +04:00
parent a5b454f98a
commit f82947e48d
6 changed files with 178 additions and 368 deletions

View File

@ -23,6 +23,7 @@
#include "json_table.h"
#include "sql_show.h"
#include "sql_select.h"
#include "create_tmp_table.h"
#define HA_ERR_JSON_TABLE (HA_ERR_LAST+1)
@ -223,28 +224,21 @@ public:
represents the table function in the query.
*/
class Create_json_table: public Data_type_statistics
class Create_json_table final: public Create_tmp_table
{
// The following members are initialized only in start()
Field **m_default_field;
uchar *m_bitmaps;
// The following members are initialized in ctor
uint m_temp_pool_slot;
uint m_null_count;
public:
Create_json_table(const TMP_TABLE_PARAM *param,
bool save_sum_fields)
:m_temp_pool_slot(MY_BIT_NONE),
m_null_count(0)
{ }
void add_field(TABLE *table, Field *field, uint fieldnr);
Create_json_table() :
Create_tmp_table((ORDER*) 0, 0, 0, 0, 0)
{}
virtual ~Create_json_table() {};
TABLE *start(THD *thd,
TMP_TABLE_PARAM *param,
Table_function_json_table *jt,
const LEX_CSTRING *table_alias);
bool choose_engine(THD *thd, TABLE *table, TMP_TABLE_PARAM *param) override
{
return 0; // Engine already choosen
}
bool add_json_table_fields(THD *thd, TABLE *table,
Table_function_json_table *jt);
bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
@ -618,27 +612,6 @@ int ha_json_table::info(uint)
}
void Create_json_table::add_field(TABLE *table, Field *field, uint fieldnr)
{
DBUG_ASSERT(!field->field_name.str ||
strlen(field->field_name.str) == field->field_name.length);
if (!(field->flags & NOT_NULL_FLAG))
m_null_count++;
table->s->reclength+= field->pack_length();
// Assign it here, before update_data_type_statistics() changes m_blob_count
if (field->flags & BLOB_FLAG)
table->s->blob_field[m_blob_count]= fieldnr;
table->field[fieldnr]= field;
field->field_index= fieldnr;
field->update_data_type_statistics(this);
}
/**
Create a json table according to a field list.
@ -653,98 +626,19 @@ TABLE *Create_json_table::start(THD *thd,
Table_function_json_table *jt,
const LEX_CSTRING *table_alias)
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
TABLE_SHARE *share;
uint copy_func_count= param->func_count;
char *tmpname,path[FN_REFLEN];
Field **reg_field;
uint *blob_field;
DBUG_ENTER("Create_json_table::start");
DBUG_PRINT("enter",
("table_alias: '%s' ", table_alias->str));
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s-%lx-%i", tmp_file_prefix,
current_pid, m_temp_pool_slot);
else
{
/* if we run out of slots or we are not using tempool */
sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid,
(ulong) thd->thread_id, thd->tmp_table++);
}
/*
No need to change table name to lower case.
*/
fn_format(path, path, mysql_tmpdir, "",
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
const uint field_count= param->field_count;
DBUG_ASSERT(field_count);
init_sql_alloc(key_memory_TABLE, &own_root,
TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
&share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
&m_default_field, sizeof(Field*) * (field_count),
&blob_field, sizeof(uint)*(field_count+1),
&param->items_to_copy,
sizeof(param->items_to_copy[0])*(copy_func_count+1),
&param->keyinfo, sizeof(*param->keyinfo),
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
&m_bitmaps, bitmap_buffer_size(field_count)*6,
NullS))
{
DBUG_RETURN(NULL); /* purecov: inspected */
}
strmov(tmpname, path);
/* make table according to fields */
bzero((char*) table,sizeof(*table));
bzero((char*) reg_field, sizeof(Field*) * (field_count+1));
bzero((char*) m_default_field, sizeof(Field*) * (field_count));
table->mem_root= own_root;
mem_root_save= thd->mem_root;
thd->mem_root= &table->mem_root;
table->field=reg_field;
table->alias.set(table_alias->str, table_alias->length, table_alias_charset);
table->reginfo.lock_type=TL_WRITE; /* Will be updated */
table->map=1;
table->temp_pool_slot= m_temp_pool_slot;
table->copy_blobs= 1;
table->in_use= thd;
table->no_rows_with_nulls= param->force_not_null_cols;
table->s= share;
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
share->blob_field= blob_field;
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
param->tmp_name= "json";
if (!(table= Create_tmp_table::start(thd, param, table_alias)))
DBUG_RETURN(0);
share= table->s;
share->not_usable_by_query_cache= FALSE;
if (param->schema_table)
share->db= INFORMATION_SCHEMA_NAME;
param->using_outer_summary_function= 0;
share->db_plugin= NULL;
if (!(table->file= new (&table->mem_root) ha_json_table(share, jt)))
DBUG_RETURN(NULL);
table->file->init();
thd->mem_root= mem_root_save;
DBUG_RETURN(table);
}
@ -756,143 +650,18 @@ bool Create_json_table::finalize(THD *thd, TABLE *table,
DBUG_ENTER("Create_json_table::finalize");
DBUG_ASSERT(table);
uint null_pack_length;
bool use_packed_rows= false;
uchar *pos;
uchar *null_flags;
TMP_ENGINE_COLUMNDEF *recinfo;
TABLE_SHARE *share= table->s;
MEM_ROOT *mem_root_save= thd->mem_root;
thd->mem_root= &table->mem_root;
DBUG_ASSERT(param->field_count >= share->fields);
DBUG_ASSERT(param->field_count >= share->blob_fields);
if (table->file->set_ha_share_ref(&share->ha_share))
{
delete table->file;
goto err;
}
if (share->blob_fields == 0)
m_null_count++;
null_pack_length= (m_null_count + m_uneven_bit_length + 7) / 8;
share->reclength+= null_pack_length;
if (!share->reclength)
share->reclength= 1; // Dummy select
{
uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1);
share->rec_buff_length= alloc_length;
if (!(table->record[0]= (uchar*)
alloc_root(&table->mem_root, alloc_length*3)))
goto err;
table->record[1]= table->record[0]+alloc_length;
share->default_values= table->record[1]+alloc_length;
}
setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields);
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
pos=table->record[0]+ null_pack_length;
if (null_pack_length)
{
bzero((uchar*) recinfo,sizeof(*recinfo));
recinfo->type=FIELD_NORMAL;
recinfo->length=null_pack_length;
recinfo++;
bfill(null_flags,null_pack_length,255); // Set null fields
table->null_flags= (uchar*) table->record[0];
share->null_fields= m_null_count;
share->null_bytes= share->null_bytes_for_compare= null_pack_length;
}
m_null_count= (share->blob_fields == 0) ? 1 : 0;
for (uint i= 0; i < share->fields; i++, recinfo++)
{
Field *field= table->field[i];
uint length;
bzero((uchar*) recinfo,sizeof(*recinfo));
if (!(field->flags & NOT_NULL_FLAG))
{
recinfo->null_bit= (uint8)1 << (m_null_count & 7);
recinfo->null_pos= m_null_count/8;
field->move_field(pos, null_flags + m_null_count/8,
(uint8)1 << (m_null_count & 7));
m_null_count++;
}
else
field->move_field(pos,(uchar*) 0,0);
if (field->type() == MYSQL_TYPE_BIT)
{
/* We have to reserve place for extra bits among null bits */
((Field_bit*) field)->set_bit_ptr(null_flags + m_null_count / 8,
m_null_count & 7);
m_null_count+= (field->field_length & 7);
}
field->reset();
/*
Test if there is a default field value. The test for ->ptr is to skip
'offset' fields generated by initialize_tables
*/
if (m_default_field[i] && m_default_field[i]->ptr)
{
/*
default_field[i] is set only in the cases when 'field' can
inherit the default value that is defined for the field referred
by the Item_field object from which 'field' has been created.
*/
const Field *orig_field= m_default_field[i];
/* Get the value from default_values */
if (orig_field->is_null_in_record(orig_field->table->s->default_values))
field->set_null();
else
{
field->set_notnull();
memcpy(field->ptr,
orig_field->ptr_in_record(orig_field->table->s->default_values),
field->pack_length_in_rec());
}
}
length=field->pack_length();
pos+= length;
/* Make entry for create table */
recinfo->length=length;
recinfo->type= field->tmp_engine_column_type(use_packed_rows);
// fix table name in field entry
field->set_table_name(&table->alias);
}
param->recinfo= recinfo; // Pointer to after last field
store_record(table,s->default_values); // Make empty default record
share->max_rows= ~(ha_rows) 0;
param->end_write_records= HA_POS_ERROR;
share->db_record_offset= 1;
if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
goto err;
if (Create_tmp_table::finalize(thd, table, param, 1, 0))
DBUG_RETURN(true);
table->db_stat= HA_OPEN_KEYFILE;
if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
DBUG_RETURN(true);
table->set_created();
thd->mem_root= mem_root_save;
DBUG_RETURN(false);
err:
thd->mem_root= mem_root_save;
DBUG_RETURN(true);
table->s->max_rows= ~(ha_rows) 0;
param->end_write_records= HA_POS_ERROR;
DBUG_RETURN(0);
}
@ -910,11 +679,11 @@ bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
uint fieldnr= 0;
MEM_ROOT *mem_root_save= thd->mem_root;
List_iterator_fast<Json_table_column> jc_i(jt->m_columns);
DBUG_ENTER("add_json_table_fields");
thd->mem_root= &table->mem_root;
current_counter= other;
while ((jc= jc_i++))
{
Create_field *sql_f= jc->m_field;
@ -963,7 +732,7 @@ bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
if (!f)
goto err_exit;
f->init(table);
add_field(table, f, fieldnr++);
add_field(table, f, fieldnr++, 0);
}
share->fields= fieldnr;
@ -1009,7 +778,7 @@ TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table)
tp.table_charset= system_charset_info;
tp.field_count= field_count;
{
Create_json_table maker(&tp, false);
Create_json_table maker;
if (!(table= maker.start(thd, &tp,
sql_table->table_function, &sql_table->alias)) ||