mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Not yet fully working Scan reads work fine, not scan updates ndb/include/kernel/ndb_limits.h: Introducing a new parameter plus increasing the max no of parallel operations per scan in LQH, first step in WL 2025 ndb/include/kernel/signaldata/ScanFrag.hpp: Only need one clientOpPtr Concurrency is batch_size to use in this scan batch_byte_size is max no of bytes sent in a batch first_batch_size is the batch size in the first batch ndb/include/kernel/signaldata/ScanTab.hpp: apiOperationPtr is sent as long signal data batch_byte_size and first_batch_size is needed for further transport to LQH batch size can now be bigger than before ndb/include/kernel/signaldata/TcKeyReq.hpp: More concurrency means more size for scanInfo also in TCKEYREQ ndb/include/ndbapi/NdbReceiver.hpp: New subroutine to caclculate batch size and similar parameters ndb/include/ndbapi/NdbScanOperation.hpp: batch size calculated before sending, not necessary to store anymore ndb/src/common/debugger/signaldata/ScanTab.cpp: Updated signal printer for SCAN_TABREQ ndb/src/kernel/blocks/backup/Backup.cpp: Fixes to make it compile, not fixed for BACKUP being useful yet ndb/src/kernel/blocks/dblqh/Dblqh.hpp: Removed parameters no longer needed and added some new ones. ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: Fix for cmaxAccOps that was using the wrong constant Removed old code New SCAN_FRAGREQ signal ndb/src/kernel/blocks/dbtc/Dbtc.hpp: New variables Removed dead code ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: New SCAN_TABREQ, SCAN_FRAGREQ, SCAN_FRAGCONF and SCAN_TABCONF Fixed some error handling to be more efficient ndb/src/kernel/blocks/suma/Suma.cpp: Fixes to make it compile, not yet usable for SUMA features ndb/src/kernel/vm/Configuration.cpp: Fix for wrong constant ndb/src/ndbapi/NdbApiSignal.cpp: Fix for not using constants ndb/src/ndbapi/NdbApiSignal.hpp: Added possibility to get signal sending node from signal ndb/src/ndbapi/NdbConnectionScan.cpp: Moved declaration ndb/src/ndbapi/NdbReceiver.cpp: New routine to calculate batch_size etc. ndb/src/ndbapi/NdbScanOperation.cpp: Various fixes for sending SCAN_TABREQ and other stuff
115 lines
3.4 KiB
C
115 lines
3.4 KiB
C
/* Copyright (C) 2003 MySQL AB
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
#ifndef NDB_LIMITS_H
|
|
#define NDB_LIMITS_H
|
|
|
|
#define RNIL 0xffffff00
|
|
|
|
/**
|
|
* Note that actual value = MAX_NODES - 1,
|
|
* since NodeId = 0 can not be used
|
|
*/
|
|
#define MAX_NDB_NODES 49
|
|
#define MAX_NODES 64
|
|
|
|
/**
|
|
* MAX_API_NODES = MAX_NODES - No of NDB Nodes in use
|
|
*/
|
|
|
|
/**
|
|
* The maximum number of replicas in the system
|
|
*/
|
|
#define MAX_REPLICAS 4
|
|
|
|
/**
|
|
* The maximum number of local checkpoints stored at a time
|
|
*/
|
|
#define MAX_LCP_STORED 3
|
|
|
|
/**
|
|
* The maximum number of log execution rounds at system restart
|
|
*/
|
|
#define MAX_LOG_EXEC 4
|
|
|
|
/**
|
|
* The maximum number of tuples per page
|
|
**/
|
|
#define MAX_TUPLES_PER_PAGE 8191
|
|
#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
|
|
//#define MAX_NO_OF_TUPLEKEY 16 Not currently used
|
|
#define MAX_TABLES 1600
|
|
#define MAX_TAB_NAME_SIZE 128
|
|
#define MAX_ATTR_NAME_SIZE 32
|
|
#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
|
|
#define MAX_ATTRIBUTES_IN_TABLE 128
|
|
#define MAX_ATTRIBUTES_IN_INDEX 32
|
|
#define MAX_TUPLE_SIZE_IN_WORDS 2013
|
|
#define MAX_FIXED_KEY_LENGTH_IN_WORDS 8
|
|
#define MAX_KEY_SIZE_IN_WORDS 1023
|
|
#define MAX_FRM_DATA_SIZE 6000
|
|
|
|
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
|
|
/*
|
|
* Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The
|
|
* API can order a multiple of this number of records at a time since
|
|
* fragments can be scanned in parallel.
|
|
*/
|
|
#define MAX_PARALLEL_OP_PER_SCAN 64
|
|
/*
|
|
* When calculating the number of records sent from LQH in each batch
|
|
* one uses SCAN_BATCH_SIZE divided by the expected size of signals
|
|
* per row. This gives the batch size used for the scan. The NDB API
|
|
* will receive one batch from each node at a time so there has to be
|
|
* some care taken also so that the NDB API is not overloaded with
|
|
* signals.
|
|
*/
|
|
#define SCAN_BATCH_SIZE 32768
|
|
/*
|
|
* To protect the NDB API from overload we also define a maximum total
|
|
* batch size from all nodes. This parameter should most likely be
|
|
* configurable, or dependent on sendBufferSize.
|
|
*/
|
|
#define MAX_SCAN_BATCH_SIZE 196608
|
|
/*
|
|
* Maximum number of Parallel Scan queries on one hash index fragment
|
|
*/
|
|
#define MAX_PARALLEL_SCANS_PER_FRAG 12
|
|
/*
|
|
* Maximum parallel ordered index scans per primary table fragment.
|
|
* Implementation limit is (256 minus 12).
|
|
*/
|
|
#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32
|
|
|
|
/**
|
|
* Computed defines
|
|
*/
|
|
#define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32)
|
|
|
|
/*
|
|
* Ordered index constants. Make configurable per index later.
|
|
*/
|
|
#define MAX_TTREE_NODE_SIZE 64 // total words in node
|
|
#define MAX_TTREE_PREF_SIZE 4 // words in min prefix
|
|
#define MAX_TTREE_NODE_SLACK 3 // diff between max and min occupancy
|
|
|
|
/*
|
|
* Blobs.
|
|
*/
|
|
#define NDB_BLOB_HEAD_SIZE 2 // sizeof(NdbBlob::Head) >> 2
|
|
|
|
#endif
|