Commit f6549e95 authored by Vicențiu Ciorbaru's avatar Vicențiu Ciorbaru

MDEV-18323 Convert MySQL JSON type to MariaDB TEXT in mysql_upgrade

This patch solves two key problems.
1. There is a type number clash between MySQL and MariaDB. The number
   245, used for MariaDB Virtual Fields is the same as MySQL's JSON.
   This leads to corrupt FRM errors if unhandled. The code properly
   checks frm table version number and if it matches 5.7+ (until 10.0+)
   it will assume it is dealing with a MySQL table with the JSON
   datatype.
2. MySQL JSON datatype uses a proprietary format to pack JSON data. The
   patch introduces a datatype plugin which parses the format and convers
   it to its string representation.

The intended conversion path is to only use the JSON datatype within
ALTER TABLE <table> FORCE, to force a table recreate. This happens
during mysql_upgrade or via a direct ALTER TABLE <table> FORCE.
parent 85c686e2
--plugin-load=$TYPE_MYSQL_JSON_SO
#
# The following test takes 2 tables containing a JSON column and attempts
# to repair them.
#
# The tables header is (Description, Expected, Actual), where description
# shows a brief description what the JSON value is testing in the MariaDB
# implementation. Expected is the longtext string and actual is the JSON
# column that needs to be converted to MariaDB's representation of
# LONGTEXT.
#
call mtr.add_suppression("Table rebuild required");
call mtr.add_suppression("is marked as crashed");
call mtr.add_suppression("Checking");
SET NAMES utf8;
#
# Check that only ALTER TABLE ... FORCE is allowed on a MySQL 5.7 table
# with a JSON column.
#
show create table tempty;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.tempty` FORCE" or dump/reload to fix it!
select * from tempty;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.tempty` FORCE" or dump/reload to fix it!
alter table tempty force;
show create table tempty;
Table Create Table
tempty CREATE TABLE `tempty` (
`t` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
show create table mysql_json_test;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test` FORCE" or dump/reload to fix it!
select * from mysql_json_test;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test` FORCE" or dump/reload to fix it!
LOCK TABLES mysql_json_test WRITE;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test` FORCE" or dump/reload to fix it!
alter table mysql_json_test force;
select description, expected, actual, expected = actual from mysql_json_test;
description expected actual expected = actual
Array LITERALS: ["prefix", false, "suffix", 1] ["prefix", false, "suffix", 1] 1
Array LITERALS: ["prefix", null, "suffix", 1] ["prefix", null, "suffix", 1] 1
Array LITERALS: ["prefix", true, "suffix", 1] ["prefix", true, "suffix", 1] 1
DateTime as Raw Value: "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000" 1
DateTime as Raw Value: "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000" 1
DateTime as Raw Value: "2015-01-15" "2015-01-15" 1
DateTime as Raw Value: "23:24:25.000000" "23:24:25.000000" 1
Empty JSON Object/Array: [] [] 1
Empty JSON Object/Array: {} {} 1
GeoJSON {"type": "GeometryCollection", "geometries": []} {"type": "GeometryCollection", "geometries": []} 1
GeoJSON {"type": "LineString", "coordinates": [[0, 5], [5, 10], [10, 15]]} {"type": "LineString", "coordinates": [[0, 5], [5, 10], [10, 15]]} 1
GeoJSON {"type": "MultiPoint", "coordinates": [[1, 1], [2, 2], [3, 3]]} {"type": "MultiPoint", "coordinates": [[1, 1], [2, 2], [3, 3]]} 1
GeoJSON {"type": "Point", "coordinates": [11.1111, 12.22222]} {"type": "Point", "coordinates": [11.1111, 12.22222]} 1
JSON LITERALS: {"val": false} {"val": false} 1
JSON LITERALS: {"val": null} {"val": null} 1
JSON LITERALS: {"val": true} {"val": true} 1
Opaque Types: opaque_mysql_type_binary "base64:type254:YWJjAAAAAAAAAA==" "base64:type254:YWJjAAAAAAAAAA==" 1
Opaque Types: opaque_mysql_type_bit "base64:type16:yv4=" "base64:type16:yv4=" 1
Opaque Types: opaque_mysql_type_blob "base64:type252:yv66vg==" "base64:type252:yv66vg==" 1
Opaque Types: opaque_mysql_type_date "2015-01-15" "2015-01-15" 1
Opaque Types: opaque_mysql_type_datetime "2015-01-15 23:24:25.000000" "2015-01-15 23:24:25.000000" 1
Opaque Types: opaque_mysql_type_enum "b" "b" 1
Opaque Types: opaque_mysql_type_geom {"type": "Point", "coordinates": [1, 1]} {"type": "Point", "coordinates": [1, 1]} 1
Opaque Types: opaque_mysql_type_longblob "base64:type251:yv66vg==" "base64:type251:yv66vg==" 1
Opaque Types: opaque_mysql_type_mediumblob "base64:type250:yv66vg==" "base64:type250:yv66vg==" 1
Opaque Types: opaque_mysql_type_set "b,c" "b,c" 1
Opaque Types: opaque_mysql_type_time "23:24:25.000000" "23:24:25.000000" 1
Opaque Types: opaque_mysql_type_tinyblob "base64:type249:yv66vg==" "base64:type249:yv66vg==" 1
Opaque Types: opaque_mysql_type_varbinary "base64:type15:YWJj" "base64:type15:YWJj" 1
Opaque Types: opaque_mysql_type_varchar "base64:type15:Zm9v" "base64:type15:Zm9v" 1
Opaque Types: opaque_mysql_type_year "base64:type13:MjAxOQ==" "base64:type13:MjAxOQ==" 1
Raw LITERALS: false false 1
Raw LITERALS: null null 1
Raw LITERALS: true true 1
Raw doubles as JSON -2.2250738585072014e-308 -2.2250738585072014e-308 1
Raw doubles as JSON -5678.987 -5678.987 1
Raw doubles as JSON 0.0 0.0 1
Raw doubles as JSON 2.2250738585072014e-308 2.2250738585072014e-308 1
Raw doubles as JSON 3.14 3.14 1
Raw integers as JSON -127 -127 1
Raw integers as JSON -2147483648 -2147483648 1
Raw integers as JSON -32768 -32768 1
Raw integers as JSON -9223372036854775807 -9223372036854775807 1
Raw integers as JSON 0 0 1
Raw integers as JSON 128 128 1
Raw integers as JSON 18446744073709551615 18446744073709551615 1
Raw integers as JSON 2147483647 2147483647 1
Raw integers as JSON 32767 32767 1
Raw integers as JSON 4294967295 4294967295 1
Raw integers as JSON 65535 65535 1
Raw integers as JSON 65536 65536 1
Raw integers as JSON 9223372036854775807 9223372036854775807 1
Simple Array as Base Key [1, 2, 3, 4, 5, [], "a", "b", "c"] [1, 2, 3, 4, 5, [], "a", "b", "c"] 1
Simple Array as Value {"a": [1, 2], "b": ["x", "y"]} {"a": [1, 2], "b": ["x", "y"]} 1
Simple JSON test {"key1": "val1", "key2": "val2"} {"key1": "val1", "key2": "val2"} 1
Special Characters: "" "" 1
Special Characters: "'" "'" 1
Special Characters: "'" "'" 1
Special Characters: "'" "'" 1
Special Characters: "''" "''" 1
Special Characters: "\"" "\"" 1
Special Characters: "\\" "\\" 1
Special Characters: "\\b" "\\b" 1
Special Characters: "\b" "\b" 1
Special Characters: "\f" "\f" 1
Special Characters: "\n" "\n" 1
Special Characters: "\r" "\r" 1
Special Characters: "\t" "\t" 1
Special Characters: "f" "f" 1
Special Characters: "key1 - with \" val " "key1 - with \" val " 1
Special Characters: "q" "q" 1
Special Characters: "some_string" "some_string" 1
Special Characters: ["a ' b", "c ' d"] ["a ' b", "c ' d"] 1
Special Characters: ["a \" b", "c \" d"] ["a \" b", "c \" d"] 1
Special Characters: ["a \\ b", "c \\ d"] ["a \\ b", "c \\ d"] 1
Special Characters: ["a \b b", "c \b d"] ["a \b b", "c \b d"] 1
Special Characters: ["a \f b", "c \f d"] ["a \f b", "c \f d"] 1
Special Characters: ["a \r b", "c \r d"] ["a \r b", "c \r d"] 1
Special Characters: ["a \t b", "c \t d"] ["a \t b", "c \t d"] 1
Special Characters: {"[": "]"} {"[": "]"} 1
Special Characters: {"key ' key": "val ' val"} {"key ' key": "val ' val"} 1
Special Characters: {"key \" key": "val \" val"} {"key \" key": "val \" val"} 1
Special Characters: {"key \\ key": "val \\ val"} {"key \\ key": "val \\ val"} 1
Special Characters: {"key \\0 key": "val \n val"} {"key \\0 key": "val \n val"} 1
Special Characters: {"key \\Z key": "val ' val"} {"key \\Z key": "val ' val"} 1
Special Characters: {"key \b key": "val \b val"} {"key \b key": "val \b val"} 1
Special Characters: {"key \f key": "val \f val"} {"key \f key": "val \f val"} 1
Special Characters: {"key \n key": "val \n val"} {"key \n key": "val \n val"} 1
Special Characters: {"key \r key": "val \r val"} {"key \r key": "val \r val"} 1
Special Characters: {"key \t key": "val \t val"} {"key \t key": "val \t val"} 1
Special Characters: {"key1 and \n\"key2\"": "val1\t val2"} {"key1 and \n\"key2\"": "val1\t val2"} 1
Special Characters: {"{": "}"} {"{": "}"} 1
Special Characters: {"{": "}"} {"{": "}"} 1
Special String Cases: [""] [""] 1
Special String Cases: {"": ""} {"": ""} 1
Timestamp as RawValue "2019-12-26 19:56:03.000000" "2019-12-26 19:56:03.000000" 1
UTF8 Characters: "Anel Husaković - test: đžšćč" "Anel Husaković - test: đžšćč" 1
UTF8 Characters: {"Name": "Anel Husaković - test: đžšćč"} {"Name": "Anel Husaković - test: đžšćč"} 1
UTF8 Characters: {"Person": "EMP", "details": {"Name": "Anel Husaković - test: đžšćč"}} {"Person": "EMP", "details": {"Name": "Anel Husaković - test: đžšćč"}} 1
UTF8 Characters: {"details": {"Name": "Anel Husaković - test: đžšćč"}, "\"Anel Husaković - test: đžšćč\"": "EMP"} {"details": {"Name": "Anel Husaković - test: đžšćč"}, "\"Anel Husaković - test: đžšćč\"": "EMP"} 1
#
# A quick check that all rows match from the original MySQL Table.
#
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests'
from mysql_json_test;
Total_Number_of_Tests Succesful_Tests
100 100
show create table mysql_json_test;
Table Create Table
mysql_json_test CREATE TABLE `mysql_json_test` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
show create table mysql_json_test_big;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test_big` FORCE" or dump/reload to fix it!
select * from mysql_json_test_big;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test_big` FORCE" or dump/reload to fix it!
#
# This test checks the long format implementation of MySQL's JSON
# Not printing the actual contents as they are not readable by a human,
# just compare the strings, make sure they match.
#
alter table mysql_json_test_big force;
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test_big;
Total_Number_of_Tests Succesful_Tests String_is_valid_JSON
1 1 1
drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
--source include/have_utf8.inc
--echo #
--echo # The following test takes 2 tables containing a JSON column and attempts
--echo # to repair them.
--echo #
--echo # The tables header is (Description, Expected, Actual), where description
--echo # shows a brief description what the JSON value is testing in the MariaDB
--echo # implementation. Expected is the longtext string and actual is the JSON
--echo # column that needs to be converted to MariaDB's representation of
--echo # LONGTEXT.
--echo #
call mtr.add_suppression("Table rebuild required");
call mtr.add_suppression("is marked as crashed");
call mtr.add_suppression("Checking");
let $MYSQLD_DATADIR= `select @@datadir`;
SET NAMES utf8;
--copy_file std_data/mysql_json/tempty.frm $MYSQLD_DATADIR/test/tempty.frm
--copy_file std_data/mysql_json/tempty.MYI $MYSQLD_DATADIR/test/tempty.MYI
--copy_file std_data/mysql_json/tempty.MYD $MYSQLD_DATADIR/test/tempty.MYD
--copy_file std_data/mysql_json/mysql_json_test.frm $MYSQLD_DATADIR/test/mysql_json_test.frm
--copy_file std_data/mysql_json/mysql_json_test.MYI $MYSQLD_DATADIR/test/mysql_json_test.MYI
--copy_file std_data/mysql_json/mysql_json_test.MYD $MYSQLD_DATADIR/test/mysql_json_test.MYD
--copy_file std_data/mysql_json/mysql_json_test_big.frm $MYSQLD_DATADIR/test/mysql_json_test_big.frm
--copy_file std_data/mysql_json/mysql_json_test_big.MYI $MYSQLD_DATADIR/test/mysql_json_test_big.MYI
--copy_file std_data/mysql_json/mysql_json_test_big.MYD $MYSQLD_DATADIR/test/mysql_json_test_big.MYD
--echo #
--echo # Check that only ALTER TABLE ... FORCE is allowed on a MySQL 5.7 table
--echo # with a JSON column.
--echo #
--error ER_TABLE_NEEDS_REBUILD
show create table tempty;
--error ER_TABLE_NEEDS_REBUILD
select * from tempty;
alter table tempty force;
show create table tempty;
--error ER_TABLE_NEEDS_REBUILD
show create table mysql_json_test;
--error ER_TABLE_NEEDS_REBUILD
select * from mysql_json_test;
--error ER_TABLE_NEEDS_REBUILD
LOCK TABLES mysql_json_test WRITE;
alter table mysql_json_test force;
--sorted_result
select description, expected, actual, expected = actual from mysql_json_test;
--echo #
--echo # A quick check that all rows match from the original MySQL Table.
--echo #
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests'
from mysql_json_test;
show create table mysql_json_test;
--error ER_TABLE_NEEDS_REBUILD
show create table mysql_json_test_big;
--error ER_TABLE_NEEDS_REBUILD
select * from mysql_json_test_big;
--echo #
--echo # This test checks the long format implementation of MySQL's JSON
--echo # Not printing the actual contents as they are not readable by a human,
--echo # just compare the strings, make sure they match.
--echo #
alter table mysql_json_test_big force;
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test_big;
drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
call mtr.add_suppression("Table rebuild required");
call mtr.add_suppression("is marked as crashed");
call mtr.add_suppression("Checking");
SET NAMES utf8;
set sql_mode="";
install soname 'type_mysql_json.so';
show create table tempty;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.tempty` FORCE" or dump/reload to fix it!
show create table mysql_json_test;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test` FORCE" or dump/reload to fix it!
show create table mysql_json_test_big;
ERROR HY000: Table rebuild required. Please do "ALTER TABLE `test.mysql_json_test_big` FORCE" or dump/reload to fix it!
# Run mysql_upgrade to fix the tables containing JSON.
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.roles_mapping OK
mysql.servers OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
Processing databases
information_schema
mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
performance_schema
test
test.mysql_json_test Needs upgrade
test.mysql_json_test_big Needs upgrade
test.tempty Needs upgrade
Repairing tables
test.mysql_json_test OK
test.mysql_json_test_big OK
test.tempty OK
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
#
# Now check if the table structure is correct and that the data
# is still present.
#
show create table tempty;
Table Create Table
tempty CREATE TABLE `tempty` (
`t` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
show create table mysql_json_test;
Table Create Table
mysql_json_test CREATE TABLE `mysql_json_test` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
show create table mysql_json_test_big;
Table Create Table
mysql_json_test_big CREATE TABLE `mysql_json_test_big` (
`description` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`expected` longtext COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`actual` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test;
Total_Number_of_Tests Succesful_Tests String_is_valid_JSON
100 100 100
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test_big;
Total_Number_of_Tests Succesful_Tests String_is_valid_JSON
1 1 1
drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
uninstall soname 'type_mysql_json.so';
-- source include/mysql_upgrade_preparation.inc
-- source include/have_working_dns.inc
-- source include/have_innodb.inc
call mtr.add_suppression("Table rebuild required");
call mtr.add_suppression("is marked as crashed");
call mtr.add_suppression("Checking");
let $MYSQLD_DATADIR= `select @@datadir`;
SET NAMES utf8;
--copy_file std_data/mysql_json/tempty.frm $MYSQLD_DATADIR/test/tempty.frm
--copy_file std_data/mysql_json/tempty.MYI $MYSQLD_DATADIR/test/tempty.MYI
--copy_file std_data/mysql_json/tempty.MYD $MYSQLD_DATADIR/test/tempty.MYD
--copy_file std_data/mysql_json/mysql_json_test.frm $MYSQLD_DATADIR/test/mysql_json_test.frm
--copy_file std_data/mysql_json/mysql_json_test.MYI $MYSQLD_DATADIR/test/mysql_json_test.MYI
--copy_file std_data/mysql_json/mysql_json_test.MYD $MYSQLD_DATADIR/test/mysql_json_test.MYD
--copy_file std_data/mysql_json/mysql_json_test_big.frm $MYSQLD_DATADIR/test/mysql_json_test_big.frm
--copy_file std_data/mysql_json/mysql_json_test_big.MYI $MYSQLD_DATADIR/test/mysql_json_test_big.MYI
--copy_file std_data/mysql_json/mysql_json_test_big.MYD $MYSQLD_DATADIR/test/mysql_json_test_big.MYD
set sql_mode="";
--eval install soname '$TYPE_MYSQL_JSON_SO'
--error ER_TABLE_NEEDS_REBUILD
show create table tempty;
--error ER_TABLE_NEEDS_REBUILD
show create table mysql_json_test;
--error ER_TABLE_NEEDS_REBUILD
show create table mysql_json_test_big;
--echo # Run mysql_upgrade to fix the tables containing JSON.
--exec $MYSQL_UPGRADE --force 2>&1
--echo #
--echo # Now check if the table structure is correct and that the data
--echo # is still present.
--echo #
show create table tempty;
show create table mysql_json_test;
show create table mysql_json_test_big;
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test;
select count(*) as 'Total_Number_of_Tests',
sum(expected = actual) as 'Succesful_Tests',
sum(JSON_VALID(actual)) as 'String_is_valid_JSON'
from mysql_json_test_big;
drop table tempty;
drop table mysql_json_test;
drop table mysql_json_test_big;
--eval uninstall soname '$TYPE_MYSQL_JSON_SO'
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
This diff was suppressed by a .gitattributes entry.
# Copyright (c) 2020, MariaDB Foundation.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
MYSQL_ADD_PLUGIN(type_mysql_json
mysql_json.cc type.cc
MODULE_ONLY RECOMPILE_FOR_EMBEDDED)
/*
Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2020 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
#include "mysql_json.h"
#include "my_global.h"
#include "compat56.h"
#include "my_decimal.h"
#include "sql_time.h"
static void TIME_from_longlong_date_packed(MYSQL_TIME *ltime, longlong tmp)
{
TIME_from_longlong_datetime_packed(ltime, tmp);
ltime->time_type= MYSQL_TIMESTAMP_DATE;
}
/*
Json values in MySQL comprises the standard set of JSON values plus a MySQL
specific set. A JSON number type is subdivided into int, uint, double and
decimal.
MySQL also adds four built-in date/time values: date, time, datetime and
timestamp. An additional opaque value can store any other MySQL type.
*/
enum JSONB_LITERAL_TYPES {
JSONB_NULL_LITERAL= 0x0,
JSONB_TRUE_LITERAL= 0x1,
JSONB_FALSE_LITERAL= 0x2,
};
/*
The size of offset or size fields in the small and the large storage
format for JSON objects and JSON arrays.
*/
static const uchar SMALL_OFFSET_SIZE= 2;
static const uchar LARGE_OFFSET_SIZE= 4;
/*
The size of key entries for objects when using the small storage
format or the large storage format. In the small format it is 4
bytes (2 bytes for key length and 2 bytes for key offset). In the
large format it is 6 (2 bytes for length, 4 bytes for offset).
*/
static const uchar KEY_ENTRY_SIZE_SMALL= (2 + SMALL_OFFSET_SIZE);
static const uchar KEY_ENTRY_SIZE_LARGE= (2 + LARGE_OFFSET_SIZE);
/*
The size of value entries for objects or arrays. When using the
small storage format, the entry size is 3 (1 byte for type, 2 bytes
for offset). When using the large storage format, it is 5 (1 byte
for type, 4 bytes for offset).
*/
static const uchar VALUE_ENTRY_SIZE_SMALL= (1 + SMALL_OFFSET_SIZE);
static const uchar VALUE_ENTRY_SIZE_LARGE= (1 + LARGE_OFFSET_SIZE);
/* The maximum number of nesting levels allowed in a JSON document. */
static const uchar JSON_DOCUMENT_MAX_DEPTH= 150;
/**
Read an offset or size field from a buffer. The offset could be either
a two byte unsigned integer or a four byte unsigned integer.
@param data the buffer to read from
@param large tells if the large or small storage format is used; true
means read four bytes, false means read two bytes
*/
static inline size_t read_offset_or_size(const uchar *data, bool large)
{
return large ? uint4korr(data) : uint2korr(data);
}
static inline size_t key_size(bool large)
{
return large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL;
}
static inline size_t value_size(bool large)
{
return large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL;
}
/**
Inlined values are a space optimization. The actual value is stored
instead of the offset pointer to the location where a non-inlined
value would be located.
@param[in] type The type to check.
@param[in] large tells if the large or small storage format is used;
*/
static inline bool type_is_stored_inline(JSONB_TYPES type, bool large)
{
return (type == JSONB_TYPE_INT16 ||
type == JSONB_TYPE_UINT16 ||
type == JSONB_TYPE_LITERAL ||
(large && (type == JSONB_TYPE_INT32 ||
type == JSONB_TYPE_UINT32)));
}
/**
Read a variable length integer. A variable length integer uses the 8th bit in
each byte to mark if there are more bytes needed to store the integer. The
other 7 bits in the byte are used to store the actual integer's bits.
@param[in] data the buffer to read from
@param[in] data_length the maximum number of bytes to read from data
@param[out] length the length that was read
@param[out] num the number of bytes needed to represent the length
@return false on success, true on error
*/
static inline bool read_variable_length(const uchar *data, size_t data_length,
size_t *length, size_t *num)
{
/*
It takes five bytes to represent UINT_MAX32, which is the largest
supported length, so don't look any further.
Use data_length as max value to prevent segfault when reading a corrupted
JSON document.
*/
const size_t MAX_BYTES= MY_MIN(data_length, 5);
size_t len= 0;
for (size_t i= 0; i < MAX_BYTES; i++)
{
/* Get the next 7 bits of the length. */
len|= (data[i] & 0x7f) << (7 * i);
if ((data[i] & 0x80) == 0)
{
/* The length shouldn't exceed 32 bits. */
if (len > UINT_MAX32)
return true;
/* This was the last byte. Return successfully. */
*num= i + 1;
*length= len;
return false;
}
}
/* No more available bytes. Return true to signal error. This implies a
corrupted JSON document. */
return true;
}
/**
JSON formatting in MySQL escapes a few special characters to prevent
ambiguity.
*/
static bool append_string_json(String *buffer, const uchar *data, size_t len)
{
const uchar *last= data + len;
for (; data < last; data++)
{
const uchar c= *data;
switch (c) {
case '\\':
buffer->append("\\\\");
break;
case '\n':
buffer->append("\\n");
break;
case '\r':
buffer->append("\\r");
break;
case '"':
buffer->append("\\\"");
break;
case '\b':
buffer->append("\\b");
break;
case '\f':
buffer->append("\\f");
break;
case '\t':
buffer->append("\\t");
break;
default:
buffer->append(c);
break;
}
}
return false;
}
/*
Function used for JSON_OPAQUE type.
*/
static bool print_mysql_datetime_value(String *buffer, enum_field_types type,
const uchar *data, size_t len)
{
if (len < 8)
return true;
MYSQL_TIME t;
switch (type)
{
case MYSQL_TYPE_TIME:
TIME_from_longlong_time_packed(&t, sint8korr(data));
break;
case MYSQL_TYPE_DATE:
TIME_from_longlong_date_packed(&t, sint8korr(data));
break;
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_TIMESTAMP:
TIME_from_longlong_datetime_packed(&t, sint8korr(data));
break;
default:
DBUG_ASSERT(0);
return true;
}
/* Wrap all datetime strings within double quotes. */
buffer->append('\"');
buffer->reserve(MAX_DATE_STRING_REP_LENGTH);
buffer->length(buffer->length() +
my_TIME_to_str(&t, const_cast<char *>(buffer->end()), 6));
buffer->append('\"');
return false;
}
static bool parse_mysql_scalar(String *buffer, size_t value_json_type,
const uchar *data, size_t len)
{
switch (value_json_type) {
case JSONB_TYPE_LITERAL:
{
if (len < 1)
return true;
switch (static_cast<JSONB_LITERAL_TYPES>(*data)) {
case JSONB_NULL_LITERAL:
return buffer->append("null");
case JSONB_TRUE_LITERAL:
return buffer->append("true");
case JSONB_FALSE_LITERAL:
return buffer->append("false");
default: /* Invalid literal constant, malformed JSON. */
return true;
}
}
case JSONB_TYPE_INT16:
return len < 2 || buffer->append_longlong(sint2korr(data));
case JSONB_TYPE_INT32:
return len < 4 || buffer->append_longlong(sint4korr(data));
case JSONB_TYPE_INT64:
return len < 8 || buffer->append_longlong(sint8korr(data));
case JSONB_TYPE_UINT16:
return len < 2 || buffer->append_ulonglong(uint2korr(data));
case JSONB_TYPE_UINT32:
return len < 4 || buffer->append_ulonglong(uint4korr(data));
case JSONB_TYPE_UINT64:
return len < 8 || buffer->append_ulonglong(uint8korr(data));
case JSONB_TYPE_DOUBLE:
if (len < 8)
return true;
buffer->reserve(FLOATING_POINT_BUFFER, 2 * FLOATING_POINT_BUFFER);
buffer->qs_append(reinterpret_cast<const double *>(data));
return false;
case JSONB_TYPE_STRING:
{
size_t string_length, store_bytes;
return read_variable_length(data, len, &string_length, &store_bytes) ||
len < store_bytes + string_length ||
buffer->append('"') ||
append_string_json(buffer, data + store_bytes, string_length) ||
buffer->append('"');
}
case JSONB_TYPE_OPAQUE:
{
/* The field_type maps directly to enum_field_types. */
const uchar type_value= *data;
const enum_field_types field_type= static_cast<enum_field_types>(type_value);
size_t blob_length, length_bytes;
const uchar *blob_start;
if (read_variable_length(data + 1, len, &blob_length, &length_bytes) ||
len < length_bytes + blob_length)
return true;
blob_start= data + length_bytes + 1;
switch (field_type) {
case MYSQL_TYPE_TIME:
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_TIMESTAMP:
return print_mysql_datetime_value(buffer, field_type,
blob_start, blob_length);
case MYSQL_TYPE_NEWDECIMAL:
{
/* Expect at least two bytes, which contain precision and scale. */
if (blob_length < 2)
return true;
const int precision= blob_start[0];
const int scale= blob_start[1];
my_decimal d;
/* The decimal value is encoded after the two prec/scale bytes. */
const size_t dec_size= my_decimal_get_binary_size(precision, scale);
if (dec_size != blob_length - 2 ||
binary2my_decimal(E_DEC_ERROR,
reinterpret_cast<const uchar *>(blob_start + 2),
&d, precision, scale) != E_DEC_OK)
return true;
if (d.to_string_native(buffer, 0, 0, ' ', E_DEC_ERROR) != E_DEC_OK)
return true;
return false;
}
default:
{
/* Any other MySQL type is presented as a base64 encoded string. */
if (buffer->append("\"base64:type") ||
buffer->append_longlong(field_type) ||
buffer->append(':'))
return true;
const size_t needed= my_base64_needed_encoded_length(
static_cast<int>(blob_length));
if (buffer->reserve(needed) ||
my_base64_encode(blob_start, blob_length,
const_cast<char*>(buffer->end())))
return true;
/* -1 to override the null terminator from my_base64_encode */
DBUG_ASSERT(*(buffer->end() + needed) == '\0');
buffer->length(buffer->length() + needed - 1);
return buffer->append('"');
}
}
}
default:
return true;
}
}
/**
Read a value from a JSON Object or Array, given the position of it.
This function handles both inlined values as well as values stored at
an offset.
@param[out] buffer Where to print the results.
@param[in] data The raw binary data of the Object or Array.
@param[in] len The length of the binary data.
@param[in] value_type_offset Where the type of the value is stored.
@param[in] large true if the large storage format is used;
@param[in] depth How deep the JSON object is in the hierarchy.
*/
static bool parse_mysql_scalar_or_value(String *buffer, const uchar *data,
size_t len, size_t value_type_offset,
bool large, size_t depth)
{
/* Get the type of the value stored at the key. */
const JSONB_TYPES value_type=
static_cast<JSONB_TYPES>(data[value_type_offset]);
if (type_is_stored_inline(value_type, large))
{
const size_t value_start = value_type_offset + 1;
if (parse_mysql_scalar(buffer, value_type, data + value_start,
len - value_start))
return true;
}
else
{
/* The offset to where the value is stored is relative to the start
of the Object / Array */
const size_t value_start= read_offset_or_size(
data + value_type_offset + 1, large);
if (parse_mysql_json_value(buffer, value_type, data + value_start,
len - value_start, depth))
return true;
}
return false;
}
static bool parse_array_or_object(String *buffer, const uchar *data, size_t len,
bool handle_as_object, bool large,
size_t depth)
{
if (++depth > JSON_DOCUMENT_MAX_DEPTH)
return true;
/*
Make sure the document is long enough to contain the two length fields
(both number of elements or members, and number of bytes).
*/
const size_t offset_size= large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE;
/* The length has to be at least double offset size (header). */
if (len < 2 * offset_size)
return true;
/*
Every JSON Object or Array contains two numbers in the header:
- The number of elements in the Object / Array (Keys)
- The total number of bytes occupied by the JSON Object / Array, including
the two numbers in the header.
Depending on the Object / Array type (small / large) the numbers are stored
in 2 bytes or 4 bytes each.
*/
const size_t element_count= read_offset_or_size(data, large);
const size_t bytes= read_offset_or_size(data + offset_size, large);
/* The value can't have more bytes than what's available in the buffer. */
if (bytes > len)
return true;
if (buffer->append(handle_as_object ? '{' : '['))
return true;
for (size_t i= 0; i < element_count; i++)
{
if (handle_as_object)
{
/*
The JSON Object is stored as a header part and a data part.
Header consists of:
- two length fields,
- an array of pointers to keys.
- an array of tuples (type, pointer to values)
* For certain types, the pointer to values is replaced by the actual
value. (see type_is_stored_inline)
Data consists of:
- All Key data, in order
- All Value data, in order
*/
const size_t key_offset= 2 * offset_size + i * key_size(large);
const size_t key_start= read_offset_or_size(data + key_offset, large);
/* The length of keys is always stored in 2 bytes (large == false) */
const size_t key_len= read_offset_or_size(
data + key_offset + offset_size, false);
const size_t value_type_offset=(2 * offset_size +
element_count * key_size(large) +
i * value_size(large));
/* First print the key. */
if (buffer->append('"') ||
append_string_json(buffer, data + key_start, key_len) ||
buffer->append("\": "))
{
return true;
}
/* Then print the value. */
if (parse_mysql_scalar_or_value(buffer, data, bytes, value_type_offset,
large, depth))
return true;
}
else
{
/*
Arrays do not have the keys vector and its associated data.
We jump straight to reading values.
*/
const size_t value_type_offset= 2 * offset_size + value_size(large) * i;
if (parse_mysql_scalar_or_value(buffer, data, bytes, value_type_offset,
large, depth))
return true;
}
if (i != element_count - 1 && buffer->append(", "))
return true;
}
return buffer->append(handle_as_object ? '}' : ']');
}
/**
Check the first byte of data which is the enum structure and based on it
perform parsing of object or array where each can have small or large
representation.
@param[out] buffer Where to print the results.
@param[in] type Type of value {object, array, scalar}.
@param[in] data Raw data for parsing.
@param[in] length Length of data.
@param[in] depth Depth size.
*/
bool parse_mysql_json_value(String *buffer, JSONB_TYPES type, const uchar *data,
size_t len, size_t depth)
{
const bool IS_OBJECT=true, IS_LARGE=true;
switch (type) {
case JSONB_TYPE_SMALL_OBJECT:
return parse_array_or_object(buffer, data, len, IS_OBJECT, !IS_LARGE, depth);
case JSONB_TYPE_LARGE_OBJECT:
return parse_array_or_object(buffer, data, len, IS_OBJECT, IS_LARGE, depth);
case JSONB_TYPE_SMALL_ARRAY:
return parse_array_or_object(buffer, data, len, !IS_OBJECT, !IS_LARGE, depth);
case JSONB_TYPE_LARGE_ARRAY:
return parse_array_or_object(buffer, data, len, !IS_OBJECT, IS_LARGE, depth);
default:
return parse_mysql_scalar(buffer, type, data, len);
}
}
/*
Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2020 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
#ifndef MYSQL_JSON_INCLUDED
#define MYSQL_JSON_INCLUDED
#include "my_global.h"
#include "sql_string.h" // String
enum JSONB_TYPES {
JSONB_TYPE_SMALL_OBJECT= 0x0,
JSONB_TYPE_LARGE_OBJECT= 0x1,
JSONB_TYPE_SMALL_ARRAY= 0x2,
JSONB_TYPE_LARGE_ARRAY= 0x3,
JSONB_TYPE_LITERAL= 0x4,
JSONB_TYPE_INT16= 0x5,
JSONB_TYPE_UINT16= 0x6,
JSONB_TYPE_INT32= 0x7,
JSONB_TYPE_UINT32= 0x8,
JSONB_TYPE_INT64= 0x9,
JSONB_TYPE_UINT64= 0xA,
JSONB_TYPE_DOUBLE= 0xB,
JSONB_TYPE_STRING= 0xC,
JSONB_TYPE_OPAQUE= 0xF
};
bool parse_mysql_json_value(String *buffer, JSONB_TYPES type, const uchar *data,
size_t len, size_t depth);
#endif /* MYSQL_JSON_INCLUDED */
/*
Copyright (c) 2020 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
#include <mysql/plugin_data_type.h>
#include <my_global.h>
#include <sql_type.h>
#include <field.h>
#include <mysqld_error.h>
#include "mysql_json.h"
static const LEX_CSTRING empty_clex_str= {"", 0};
class Type_handler_mysql_json: public Type_handler_blob
{
public:
Field *make_conversion_table_field(MEM_ROOT *, TABLE *, uint, const Field *)
const override;
const Type_collection *type_collection() const override;
Field *make_table_field_from_def(TABLE_SHARE *, MEM_ROOT *,
const LEX_CSTRING *, const Record_addr &,
const Bit_addr &,
const Column_definition_attributes *,
uint32) const override;
Field *make_table_field(MEM_ROOT *, const LEX_CSTRING *,
const Record_addr &, const Type_all_attributes &,
TABLE_SHARE *) const override;
};
Type_handler_mysql_json type_handler_mysql_json;
class Field_mysql_json: public Field_blob
{
public:
Field_mysql_json(uchar *ptr_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
const LEX_CSTRING *field_name_arg, TABLE_SHARE *share,
uint blob_pack_length, const DTCollation &collation)
: Field_blob(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
field_name_arg, share, blob_pack_length,
&my_charset_utf8mb4_bin)
{}
String *val_str(String *val_buffer, String *val_str);
const Type_handler *type_handler() const { return &type_handler_mysql_json; }
bool parse_mysql(String *dest, const char *data, size_t length) const;
bool send(Protocol *protocol) { return Field::send(protocol); }
void sql_type(String &s) const
{ s.set_ascii(STRING_WITH_LEN("json /* MySQL 5.7 */")); }
/* this will make ALTER TABLE to consider it different from built-in field */
Compression_method *compression_method() const { return (Compression_method*)1; }
};
Field *Type_handler_mysql_json::make_conversion_table_field(MEM_ROOT *root,
TABLE *table, uint metadata, const Field *target) const
{
uint pack_length= metadata & 0x00ff;
if (pack_length < 1 || pack_length > 4)
return NULL; // Broken binary log?
return new (root)
Field_mysql_json(NULL, (uchar *) "", 1, Field::NONE, &empty_clex_str,
table->s, pack_length, target->charset());
}
Field *Type_handler_mysql_json::make_table_field_from_def(TABLE_SHARE *share,
MEM_ROOT *root, const LEX_CSTRING *name,
const Record_addr &addr, const Bit_addr &bit,
const Column_definition_attributes *attr, uint32 flags) const
{
return new (root) Field_mysql_json(addr.ptr(), addr.null_ptr(),
addr.null_bit(), attr->unireg_check, name, share,
attr->pack_flag_to_pack_length(), attr->charset);
}
Field *Type_handler_mysql_json::make_table_field(MEM_ROOT *root,
const LEX_CSTRING *name, const Record_addr &addr,
const Type_all_attributes &attr, TABLE_SHARE *share) const
{
return new (root) Field_mysql_json(addr.ptr(), addr.null_ptr(),
addr.null_bit(), Field::NONE, name, share, 2, attr.collation);
}
String *Field_mysql_json::val_str(String *val_buffer, String *val_ptr)
{
String *raw_value= Field_blob::val_str(val_buffer, val_ptr);
String data;
data.copy(*raw_value);
val_ptr->length(0);
if (parse_mysql(val_ptr, data.ptr(), data.length()))
{
val_ptr->length(0);
my_printf_error(ER_UNKNOWN_ERROR,
"Error parsing MySQL JSON format, please dump this table from MySQL "
"and then restore it to be able to use it in MariaDB.", MYF(0));
}
return val_ptr;
}
bool Field_mysql_json::parse_mysql(String *dest,
const char *data, size_t length) const
{
if (!data)
return false;
/* Each JSON blob must start with a type specifier. */
if (length < 2)
return true;
if (parse_mysql_json_value(dest, static_cast<JSONB_TYPES>(data[0]),
reinterpret_cast<const uchar*>(data) + 1,
length - 1, 0))
return true;
return false;
}
class Type_collection_mysql_json: public Type_collection
{
public:
const Type_handler *aggregate_for_result(const Type_handler *a,
const Type_handler *b)
const override
{
if (a == b)
return a;
return NULL;
}
const Type_handler *aggregate_for_min_max(const Type_handler *a,
const Type_handler *b)
const override
{
return aggregate_for_result(a, b);
}
const Type_handler *aggregate_for_comparison(const Type_handler *a,
const Type_handler *b)
const override
{
return aggregate_for_result(a, b);
}
const Type_handler *aggregate_for_num_op(const Type_handler *a,
const Type_handler *b)
const override
{
return NULL;
}
const Type_handler *handler_by_name(const LEX_CSTRING &name) const override
{
if (type_handler_mysql_json.name().eq(name))
return &type_handler_mysql_json;
return NULL;
}
};
const Type_collection *Type_handler_mysql_json::type_collection() const
{
static Type_collection_mysql_json type_collection_mysql_json;
return &type_collection_mysql_json;
}
static struct st_mariadb_data_type plugin_descriptor_type_mysql_json=
{
MariaDB_DATA_TYPE_INTERFACE_VERSION,
&type_handler_mysql_json
};
maria_declare_plugin(type_mysql_json)
{
MariaDB_DATA_TYPE_PLUGIN,
&plugin_descriptor_type_mysql_json,
"MYSQL_JSON",
"Anel Husaković, Vicențiu Ciorbaru",
"Data type MYSQL_JSON",
PLUGIN_LICENSE_GPL,
0,
0,
0x0001,
NULL,
NULL,
"0.1",
MariaDB_PLUGIN_MATURITY_ALPHA
}
maria_declare_plugin_end;
......@@ -2367,7 +2367,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
comment_pos+= comment_length;
}
if ((uchar) strpos[13] == (uchar) MYSQL_TYPE_VIRTUAL)
if ((uchar) strpos[13] == (uchar) MYSQL_TYPE_VIRTUAL
&& likely(share->mysql_version >= 100000))
{
/*
MariaDB version 10.0 version.
......@@ -2417,7 +2418,18 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
interval_nr= (uint) strpos[12];
enum_field_types field_type= (enum_field_types) strpos[13];
if (!(handler= Type_handler::get_handler_by_real_type(field_type)))
goto err; // Not supported field type
{
if (field_type == 245 &&
share->mysql_version >= 50700) // a.k.a MySQL 5.7 JSON
{
share->incompatible_version|= HA_CREATE_USED_ENGINE;
const LEX_CSTRING mysql_json{STRING_WITH_LEN("MYSQL_JSON")};
handler= Type_handler::handler_by_name_or_error(thd, mysql_json);
}
if (!handler)
goto err; // Not supported field type
}
handler= handler->type_handler_frm_unpack(strpos);
if (handler->Column_definition_attributes_frm_unpack(&attr, share,
strpos,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment