My Project
|
Public Types | |
typedef PARTITION_INFO | PARTITION_STATS |
Public Member Functions | |
ha_ndbcluster (handlerton *hton, TABLE_SHARE *table) | |
int | open (const char *name, int mode, uint test_if_locked) |
int | close (void) |
void | local_close (THD *thd, bool release_metadata) |
int | optimize (THD *thd, HA_CHECK_OPT *check_opt) |
int | analyze (THD *thd, HA_CHECK_OPT *check_opt) |
int | analyze_index (THD *thd) |
int | write_row (uchar *buf) |
int | update_row (const uchar *old_data, uchar *new_data) |
int | delete_row (const uchar *buf) |
int | index_init (uint index, bool sorted) |
int | index_end () |
int | index_read_idx_map (uchar *buf, uint index, const uchar *key, key_part_map keypart_map, enum ha_rkey_function find_flag) |
Positions an index cursor to the index specified in argument. Fetches the row if available. If the key value is null, begin at the first key of the index. | |
int | index_read (uchar *buf, const uchar *key, uint key_len, enum ha_rkey_function find_flag) |
int | index_next (uchar *buf) |
int | index_prev (uchar *buf) |
int | index_first (uchar *buf) |
int | index_last (uchar *buf) |
int | index_read_last (uchar *buf, const uchar *key, uint key_len) |
int | rnd_init (bool scan) |
int | rnd_end () |
int | rnd_next (uchar *buf) |
int | rnd_pos (uchar *buf, uchar *pos) |
void | position (const uchar *record) |
int | read_first_row (uchar *buf, uint primary_key) |
virtual int | cmp_ref (const uchar *ref1, const uchar *ref2) |
int | read_range_first (const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted) |
Read first row between two ranges. Store ranges for future calls to read_range_next. | |
int | read_range_first_to_buf (const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted, uchar *buf) |
int | read_range_next () |
Read next row between two endpoints. | |
int | read_multi_range_first (KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, uint range_count, bool sorted, HANDLER_BUFFER *buffer) |
int | read_multi_range_next (KEY_MULTI_RANGE **found_range_p) |
bool | null_value_index_search (KEY_MULTI_RANGE *ranges, KEY_MULTI_RANGE *end_range, HANDLER_BUFFER *buffer) |
bool | get_error_message (int error, String *buf) |
ha_rows | records () |
ha_rows | estimate_rows_upper_bound () |
int | info (uint) |
void | get_dynamic_partition_info (PARTITION_STATS *stat_info, uint part_id) |
uint32 | calculate_key_hash_value (Field **field_array) |
bool | start_read_removal (void) |
ha_rows | end_read_removal (void) |
int | extra (enum ha_extra_function operation) |
int | extra_opt (enum ha_extra_function operation, ulong cache_size) |
int | reset () |
int | external_lock (THD *thd, int lock_type) |
void | unlock_row () |
int | start_stmt (THD *thd, thr_lock_type lock_type) |
void | update_create_info (HA_CREATE_INFO *create_info) |
void | print_error (int error, myf errflag) |
const char * | table_type () const |
const char ** | bas_ext () const |
ulonglong | table_flags (void) const |
void | set_part_info (partition_info *part_info, bool early) |
ulong | index_flags (uint idx, uint part, bool all_parts) const |
virtual const key_map * | keys_to_use_for_scanning () |
bool | primary_key_is_clustered () |
uint | max_supported_record_length () const |
uint | max_supported_keys () const |
uint | max_supported_key_parts () const |
uint | max_supported_key_length () const |
uint | max_supported_key_part_length () const |
int | rename_table (const char *from, const char *to) |
int | delete_table (const char *name) |
int | create (const char *name, TABLE *form, HA_CREATE_INFO *info) |
int | get_default_no_partitions (HA_CREATE_INFO *info) |
bool | get_no_parts (const char *name, uint *no_parts) |
void | set_auto_partitions (partition_info *part_info) |
virtual bool | is_fatal_error (int error, uint flags) |
THR_LOCK_DATA ** | store_lock (THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) |
bool | low_byte_first () const |
const char * | index_type (uint key_number) |
double | scan_time () |
ha_rows | records_in_range (uint inx, key_range *min_key, key_range *max_key) |
void | start_bulk_insert (ha_rows rows) |
int | end_bulk_insert () |
bool | start_bulk_update () |
int | bulk_update_row (const uchar *old_data, uchar *new_data, uint *dup_key_found) |
int | exec_bulk_update (uint *dup_key_found) |
void | end_bulk_update () |
int | ndb_update_row (const uchar *old_data, uchar *new_data, int is_bulk_update) |
const Item * | cond_push (const Item *cond) |
void | cond_pop () |
bool | maybe_pushable_join (const char *&reason) const |
int | assign_pushed_join (const ndb_pushed_join *pushed_join) |
uint | number_of_pushed_joins () const |
const TABLE * | root_of_pushed_join () const |
const TABLE * | parent_of_pushed_join () const |
int | index_read_pushed (uchar *buf, const uchar *key, key_part_map keypart_map) |
int | index_next_pushed (uchar *buf) |
uint8 | table_cache_type () |
int | ndb_err (NdbTransaction *, bool have_lock=FALSE) |
my_bool | register_query_cache_table (THD *thd, char *table_key, uint key_length, qc_engine_callback *engine_callback, ulonglong *engine_data) |
Register a named table with a call back function to the query cache. | |
int | check_if_supported_alter (TABLE *altered_table, HA_CREATE_INFO *create_info, Alter_info *alter_info, HA_ALTER_FLAGS *alter_flags, uint table_changes) |
int | alter_table_phase1 (THD *thd, TABLE *altered_table, HA_CREATE_INFO *create_info, HA_ALTER_INFO *alter_info, HA_ALTER_FLAGS *alter_flags) |
int | alter_table_phase2 (THD *thd, TABLE *altered_table, HA_CREATE_INFO *create_info, HA_ALTER_INFO *alter_info, HA_ALTER_FLAGS *alter_flags) |
int | alter_table_phase3 (THD *thd, TABLE *table, HA_CREATE_INFO *create_info, HA_ALTER_INFO *alter_info, HA_ALTER_FLAGS *alter_flags) |
Static Public Member Functions | |
static void | set_dbname (const char *pathname, char *dbname) |
static void | set_tabname (const char *pathname, char *tabname) |
static void | release_completed_operations (NdbTransaction *) |
Friends | |
class | ndb_pushed_builder_ctx |
int | ndbcluster_drop_database_impl (THD *thd, const char *path) |
int | ndb_handle_schema_change (THD *thd, Ndb *ndb, NdbEventOperation *pOp, NDB_SHARE *share) |
int | g_get_ndb_blobs_value (NdbBlob *ndb_blob, void *arg) |
int | check_completed_operations_pre_commit (Thd_ndb *, NdbTransaction *, const NdbOperation *, uint *ignore_count) |
int | ndbcluster_commit (handlerton *hton, THD *thd, bool all) |
const char** ha_ndbcluster::bas_ext | ( | ) | const [virtual] |
If frm_error() is called then we will use this to find out what file extentions exist for the storage engine. This is also used by the default rename_table and delete_table method in handler.cc.
For engines that have two file name extentions (separate meta/index file and data file), the order of elements is relevant. First element of engine file name extentions array should be meta/index file extention. Second element - data file extention. This order is assumed by prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
Implements handler.
int ha_ndbcluster::bulk_update_row | ( | const uchar * | old_data, |
uchar * | new_data, | ||
uint * | dup_key_found | ||
) | [virtual] |
This method is similar to update_row, however the handler doesn't need to execute the updates at this point in time. The handler can be certain that another call to bulk_update_row will occur OR a call to exec_bulk_update before the set of updates in this query is concluded.
Note: If HA_ERR_FOUND_DUPP_KEY is returned, the handler must read all columns of the row so MySQL can create an error message. If the columns required for the error message are not read, the error message will contain garbage.
old_data | Old record |
new_data | New record |
dup_key_found | Number of duplicate keys found |
Reimplemented from handler.
void ha_ndbcluster::cond_pop | ( | ) | [virtual] |
Pop the top condition from the condition stack of the handler instance.
Pops the top if condition stack, if stack is not empty.
Reimplemented from handler.
const Item* ha_ndbcluster::cond_push | ( | const Item * | cond | ) | [virtual] |
Push condition down to the table handler.
cond | Condition to be pushed. The condition tree must not be modified by the by the caller. |
handler->ha_reset() call empties the condition stack. Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the condition stack.
Reimplemented from handler.
int ha_ndbcluster::delete_table | ( | const char * | name | ) | [virtual] |
Delete a table in the engine. Called for base as well as temporary tables.
Delete all files with extension from bas_ext().
name | Base name of table |
0 | If we successfully deleted at least one file from base_ext and didn't get any other errors than ENOENT |
!0 | Error |
Reimplemented from handler.
void ha_ndbcluster::end_bulk_update | ( | ) | [virtual] |
Perform any needed clean-up, no outstanding updates are there at the moment.
Reimplemented from handler.
ha_rows ha_ndbcluster::end_read_removal | ( | void | ) | [virtual] |
End read (before write) removal and return the number of rows really written
Reimplemented from handler.
ha_rows ha_ndbcluster::estimate_rows_upper_bound | ( | ) | [inline, virtual] |
Return upper bound of current number of records in the table (max. of how many records one will retrieve when doing a full table scan) If upper bound is not known, HA_POS_ERROR should be returned as a max possible upper bound.
Reimplemented from handler.
int ha_ndbcluster::exec_bulk_update | ( | uint * | dup_key_found | ) | [virtual] |
After this call all outstanding updates must be performed. The number of duplicate key errors are reported in the duplicate key parameter. It is allowed to continue to the batched update after this call, the handler has to wait until end_bulk_update with changing state.
dup_key_found | Number of duplicate keys found |
0 | Success |
>0 | Error code |
Reimplemented from handler.
bool ha_ndbcluster::get_error_message | ( | int | error, |
String * | buf | ||
) | [virtual] |
bool ha_ndbcluster::get_no_parts | ( | const char * | name, |
uint * | no_parts | ||
) | [virtual] |
Get number of partitions for table in SE
name | normalized path(same as open) to the table | |
[out] | no_parts | Number of partitions |
false | for success |
true | for failure, for example table didn't exist in engine |
Reimplemented from handler.
int ha_ndbcluster::index_first | ( | uchar * | buf | ) | [virtual] |
int ha_ndbcluster::index_last | ( | uchar * | buf | ) | [virtual] |
int ha_ndbcluster::index_next | ( | uchar * | buf | ) | [virtual] |
int ha_ndbcluster::index_prev | ( | uchar * | buf | ) | [virtual] |
int ha_ndbcluster::index_read_idx_map | ( | uchar * | buf, |
uint | index, | ||
const uchar * | key, | ||
key_part_map | keypart_map, | ||
enum ha_rkey_function | find_flag | ||
) | [virtual] |
Positions an index cursor to the index specified in argument. Fetches the row if available. If the key value is null, begin at the first key of the index.
Reimplemented from handler.
virtual bool ha_ndbcluster::is_fatal_error | ( | int | error, |
uint | flags | ||
) | [inline, virtual] |
This method is used to analyse the error to see whether the error is ignorable or not, certain handlers can have more error that are ignorable than others. E.g. the partition handler can get inserts into a range where there is no partition and this is an ignorable error. HA_ERR_FOUND_DUP_UNIQUE is a special case in MyISAM that means the same thing as HA_ERR_FOUND_DUP_KEY but can in some cases lead to a slightly different error message.
Reimplemented from handler.
uint ha_ndbcluster::number_of_pushed_joins | ( | ) | const [virtual] |
Reports #tables included in pushed join which this handler instance is part of. ==0 -> Not pushed
Reimplemented from handler.
const TABLE* ha_ndbcluster::parent_of_pushed_join | ( | ) | const [virtual] |
void ha_ndbcluster::print_error | ( | int | error, |
myf | errflag | ||
) | [virtual] |
Print error that we got from handler function.
Reimplemented from handler.
int ha_ndbcluster::read_first_row | ( | uchar * | buf, |
uint | primary_key | ||
) | [virtual] |
Read first row (only) from a table.
This is never called for InnoDB tables, as these table types has the HA_STATS_RECORDS_IS_EXACT set.
Reimplemented from handler.
int ha_ndbcluster::read_multi_range_first | ( | KEY_MULTI_RANGE ** | found_range_p, |
KEY_MULTI_RANGE * | ranges, | ||
uint | range_count, | ||
bool | sorted, | ||
HANDLER_BUFFER * | buffer | ||
) |
Multi range stuff
int ha_ndbcluster::read_range_first | ( | const key_range * | start_key, |
const key_range * | end_key, | ||
bool | eq_range_arg, | ||
bool | sorted | ||
) | [virtual] |
Read first row between two ranges. Store ranges for future calls to read_range_next.
start_key | Start key. Is 0 if no min range |
end_key | End key. Is 0 if no max range |
eq_range_arg | Set to 1 if start_key == end_key |
sorted | Set to 1 if result should be sorted per key |
0 | Found row |
HA_ERR_END_OF_FILE | No rows in range |
\:: | Error code |
Reimplemented from handler.
int ha_ndbcluster::read_range_next | ( | ) | [virtual] |
Read next row between two endpoints.
0 | Found row |
HA_ERR_END_OF_FILE | No rows in range |
\:: | Error code |
Reimplemented from handler.
ha_rows ha_ndbcluster::records | ( | ) | [virtual] |
Number of rows in table. It will only be called if (table_flags() & (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT)) != 0
Reimplemented from handler.
my_bool ha_ndbcluster::register_query_cache_table | ( | THD * | thd, |
char * | table_key, | ||
uint | key_length, | ||
qc_engine_callback * | engine_callback, | ||
ulonglong * | engine_data | ||
) | [virtual] |
Register a named table with a call back function to the query cache.
thd | The thread handle | |
table_key | A pointer to the table name in the table cache | |
key_length | The length of the table name | |
[out] | engine_callback | The pointer to the storage engine call back function |
[out] | engine_data | Storage engine specific data which could be anything |
This method offers the storage engine, the possibility to store a reference to a table name which is going to be used with query cache. The method is called each time a statement is written to the cache and can be used to verify if a specific statement is cachable. It also offers the possibility to register a generic (but static) call back function which is called each time a statement is matched against the query cache.
TRUE | Success |
FALSE | The specified table or current statement should not be cached |
Reimplemented from handler.
int ha_ndbcluster::rename_table | ( | const char * | from, |
const char * | to | ||
) | [virtual] |
Default rename_table() and delete_table() rename/delete files with a given name and extensions from bas_ext().
These methods can be overridden, but their default implementation provide useful functionality.
Reimplemented from handler.
int ha_ndbcluster::reset | ( | void | ) | [virtual] |
Reset state of file to after 'open'. This function is called after every statement for all tables used by that statement.
Reimplemented from handler.
int ha_ndbcluster::rnd_init | ( | bool | scan | ) | [virtual] |
rnd_init() can be called two times without rnd_end() in between (it only makes sense if scan=1). then the second call should prepare for the new table scan (e.g if rnd_init allocates the cursor, second call should position it to the start of the table, no need to deallocate and allocate it again
Implements handler.
int ha_ndbcluster::rnd_next | ( | uchar * | buf | ) | [virtual] |
int ha_ndbcluster::rnd_pos | ( | uchar * | buf, |
uchar * | pos | ||
) | [virtual] |
const TABLE* ha_ndbcluster::root_of_pushed_join | ( | ) | const [virtual] |
bool ha_ndbcluster::start_bulk_update | ( | ) | [virtual] |
0 | Bulk update used by handler |
1 | Bulk update not used, normal operation used |
Reimplemented from handler.
bool ha_ndbcluster::start_read_removal | ( | void | ) | [virtual] |
Start read (before write) removal on the current table.
Reimplemented from handler.
THR_LOCK_DATA** ha_ndbcluster::store_lock | ( | THD * | thd, |
THR_LOCK_DATA ** | to, | ||
enum thr_lock_type | lock_type | ||
) | [virtual] |
Is not invoked for non-transactional temporary tables.
Implements handler.
uint8 ha_ndbcluster::table_cache_type | ( | ) | [virtual] |
Type of table for caching query
Reimplemented from handler.
const char* ha_ndbcluster::table_type | ( | ) | const [virtual] |
The following can be called without an open handler
Implements handler.