自Zabbix 6.0以来,主键会应用于新安装 Zabbix 数据库的所有表。
在这之前安装过的Zabbix,本章节将提供手动升级所有表主键的说明。
此章节适用于如下数据库:
导入及导出推荐在 tmux/screen 中执行, 这样您的 session 会话将不会被关闭.
另见: 注意事项
history_pk_prepare.sql
.mysql -uzabbix -p<password> zabbix < /usr/share/doc/zabbix-sql-scripts/mysql/history_pk_prepare.sql
安装 mysqlsh . mysqlsh 可以连接数据库. 如果连接是通过 socket 完成的, 可能需要声明他的路径.
通过mysqlsh连接:
运行 (CSV文件路径根据实际情况进行调整):
CSVPATH="/var/lib/mysql-files";
util.exportTable("history_old", CSVPATH + "/history.csv", { dialect: "csv" });
util.importTable(CSVPATH + "/history.csv", {"dialect": "csv", "table": "history" });
util.exportTable("history_uint_old", CSVPATH + "/history_uint.csv", { dialect: "csv" });
util.importTable(CSVPATH + "/history_uint.csv", {"dialect": "csv", "table": "history_uint" });
util.exportTable("history_str_old", CSVPATH + "/history_str.csv", { dialect: "csv" });
util.importTable(CSVPATH + "/history_str.csv", {"dialect": "csv", "table": "history_str" });
util.exportTable("history_log_old", CSVPATH + "/history_log.csv", { dialect: "csv" });
util.importTable(CSVPATH + "/history_log.csv", {"dialect": "csv", "table": "history_log" });
util.exportTable("history_text_old", CSVPATH + "/history_text.csv", { dialect: "csv" });
util.importTable(CSVPATH + "/history_text.csv", {"dialect": "csv", "table": "history_text" });
确保运行一切正常后
使用下面的命令删除旧的表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;
以下步骤仅在无法使用mysqlsh的情况下操作,因为这种方法速度慢、非常耗时。
您必须以root用户身份(推荐)或任何具有文件权限的用户登录。
MySQL 应在启用 local_infile 变量的情况下启动.
history_pk_prepare.sql
.mysql -uzabbix -p<password> zabbix < /usr/share/doc/zabbix-sql-scripts/mysql/history_pk_prepare.sql
检查是否仅对特定路径的文件启用导入/导出:
mysql> SELECT @@secure_file_priv;
+-----------------------+
| @@secure_file_priv |
+-----------------------+
| /var/lib/mysql-files/ |
+-----------------------+
如果该值是指向目录的路径,则可以对该目录中的文件执行导出/导入。在这种情况下,应当更具实际情况编辑查询到的文件路径。或者,可以在升级期间禁用secure_file_priv这个参数(设置此参数值为空)。如果该值为空,则可以对任何位置的文件执行导出/导入操作。
导出数据前应禁用该参数 max_execution_time 以避免导出时间过长造成的超时问题。
SET @@max_execution_time=0;
SELECT * INTO OUTFILE '/var/lib/mysql-files/history.csv' FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n' FROM history_old;
LOAD DATA INFILE '/var/lib/mysql-files/history.csv' IGNORE INTO TABLE history FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n';
SELECT * INTO OUTFILE '/var/lib/mysql-files/history_uint.csv' FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n' FROM history_uint_old;
LOAD DATA INFILE '/var/lib/mysql-files/history_uint.csv' IGNORE INTO TABLE history_uint FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n';
SELECT * INTO OUTFILE '/var/lib/mysql-files/history_str.csv' FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n' FROM history_str_old;
LOAD DATA INFILE '/var/lib/mysql-files/history_str.csv' IGNORE INTO TABLE history_str FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n';
SELECT * INTO OUTFILE '/var/lib/mysql-files/history_log.csv' FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n' FROM history_log_old;
LOAD DATA INFILE '/var/lib/mysql-files/history_log.csv' IGNORE INTO TABLE history_log FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n';
SELECT * INTO OUTFILE '/var/lib/mysql-files/history_text.csv' FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n' FROM history_text_old;
LOAD DATA INFILE '/var/lib/mysql-files/history_text.csv' IGNORE INTO TABLE history_text FIELDS TERMINATED BY ',' ESCAPED BY '"' LINES TERMINATED BY '\n';
确保运行一切正常后
使用下面的命令删除旧的表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;
导入及导出推荐在 tmux/screen 中执行, 这样您的 session 会话将不会被关闭.
参见: Important notes
history_pk_prepare.sql
.sudo -u zabbix psql zabbix < /usr/share/doc/zabbix-sql-scripts/postgresql/history_pk_prepare.sql
\copy history_old TO '/tmp/history.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
\copy temp_history FROM '/tmp/history.csv' DELIMITER ',' CSV
INSERT INTO history SELECT * FROM temp_history ON CONFLICT (itemid,clock,ns) DO NOTHING;
\copy history_uint_old TO '/tmp/history_uint.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
\copy temp_history_uint FROM '/tmp/history_uint.csv' DELIMITER ',' CSV
INSERT INTO history_uint SELECT * FROM temp_history_uint ON CONFLICT (itemid,clock,ns) DO NOTHING;
\copy history_str_old TO '/tmp/history_str.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
\copy temp_history_str FROM '/tmp/history_str.csv' DELIMITER ',' CSV
INSERT INTO history_str (itemid,clock,value,ns) SELECT * FROM temp_history_str ON CONFLICT (itemid,clock,ns) DO NOTHING;
\copy history_log_old TO '/tmp/history_log.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
\copy temp_history_log FROM '/tmp/history_log.csv' DELIMITER ',' CSV
INSERT INTO history_log SELECT * FROM temp_history_log ON CONFLICT (itemid,clock,ns) DO NOTHING;
\copy history_text_old TO '/tmp/history_text.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
\copy temp_history_text FROM '/tmp/history_text.csv' DELIMITER ',' CSV
INSERT INTO history_text SELECT * FROM temp_history_text ON CONFLICT (itemid,clock,ns) DO NOTHING;
确认以上步骤都正确完成
删除旧表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;
考虑使用以下步骤来提高插入数据的性能:
#### TimescaleDB v1.x
导入及导出推荐在 tmux/screen 中执行, 这样您的 session 会话将不会被关闭.
参见: [Important notes](#important-notes)
##### 升级表
* 重命名表请使用该sql文件 `history_pk_prepare.sql`.
sudo -u zabbix psql zabbix < /usr/share/doc/zabbix-sql-scripts/postgresql/history_pk_prepare.sql
* 升级 **one** 表的示例:
-- 确保您有足够的空间来导出未压缩的数据 select sum(before_compression_total_bytes)/1024/1024 as before_compression_total_mbytes, sum(after_compression_total_bytes)/1024/1024 as after_compression_total_mbytes FROM chunk_compression_stats('history_uint_old');
-- 导出数据 (select * from history_uint_old) TO '/tmp/history_uint.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_uint ( itemid bigint NOT NULL, clock integer DEFAULT '0' NOT NULL, value numeric(20) DEFAULT '0' NOT NULL, ns integer DEFAULT '0' NOT NULL ); -- 导入数据 temp_history_uint FROM '/tmp/history_uint.csv' DELIMITER ',' CSV
-- 创建 hypertable 表并插入数据 select create_hypertable('history_uint', 'clock', chunk_time_interval => 86400, migrate_data => true); INSERT INTO history_uint SELECT * FROM temp_history_uint ON CONFLICT (itemid,clock,ns) DO NOTHING;
-- 开启压缩 select set_integer_now_func('history_uint', 'zbx_ts_unix_now', true); alter table history_uint set (timescaledb.compress,timescaledb.compress_segmentby='itemid',timescaledb.compress_orderby='clock,ns');
-- 将返回的 job id 传递给 run_job select add_compress_chunks_policy('history_uint', ( select (p.older_than).integer_interval from _timescaledb_config.bgw_policy_compress_chunks p inner join _timescaledb_catalog.hypertable h on (h.id=p.hypertable_id) where h.table_name='history_uint' )::integer );
select alter_job((select job_id from timescaledb_information.jobs where hypertable_schema='public' and hypertable_name='history_uint'), scheduled => true);
-- 运行压缩 call run_job(<JOB_ID>); -- May show 'NOTICE: no chunks for hypertable public.history_uint that satisfy compress chunk policy', it is fine.
* 确保上面的步骤都运行正确
* 删除旧表
DROP TABLE history_old; DROP TABLE history_uint_old; DROP TABLE history_str_old; DROP TABLE history_log_old; DROP TABLE history_text_old;
请参见: [improving PostgreSQL insert performance](https://blog.timescale.com/blog/13-tips-to-improve-postgresql-insert-performance/)
导入及导出推荐在 tmux/screen 中执行, 这样您的 session 会话将不会被关闭.
另见: Important notes
history_pk_prepare.sql
.sudo -u zabbix psql zabbix < /usr/share/doc/zabbix-sql-scripts/postgresql/history_pk_prepare.sql
-- 确保您有足够的空间来导出未压缩的数据
select sum(before_compression_total_bytes)/1024/1024 as before_compression_total_mbytes, sum(after_compression_total_bytes)/1024/1024 as after_compression_total_mbytes FROM chunk_compression_stats('history_uint_old');
-- 导出数据
\copy (select * from history_uint_old) TO '/tmp/history_uint.csv' DELIMITER ',' CSV
CREATE TEMP TABLE temp_history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL
);
-- 导入数据
\copy temp_history_uint FROM '/tmp/history_uint.csv' DELIMITER ',' CSV
-- 创建 hypertable 表并插入数据
select create_hypertable('history_uint', 'clock', chunk_time_interval => 86400, migrate_data => true);
INSERT INTO history_uint SELECT * FROM temp_history_uint ON CONFLICT (itemid,clock,ns) DO NOTHING;
-- 启用压缩
select set_integer_now_func('history_uint', 'zbx_ts_unix_now', true);
alter table history_uint set (timescaledb.compress,timescaledb.compress_segmentby='itemid',timescaledb.compress_orderby='clock,ns');
-- 将 schema 替换为 hypertable_schema
-- 将返回的 job id 传递给 run_job
select add_compression_policy('history_uint', (
select extract(epoch from (config::json->>'compress_after')::interval) from timescaledb_information.jobs where application_name like 'Compression%%' and hypertable_schema='public' and hypertable_name='history_uint_old'
)::integer
);
select alter_job((select job_id from timescaledb_information.jobs where hypertable_schema='public' and hypertable_name='history_uint'), scheduled => true);
-- 运行压缩
call run_job(<JOB_ID>);
-- May show 'NOTICE: no chunks for hypertable public.history_uint that satisfy compress chunk policy', it is fine.
确保上面的步骤都运行正确
删除旧表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;
参见: improving PostgreSQL insert performance
导入及导出推荐在 tmux/screen 中执行, 这样您的 session 会话将不会被关闭.
另见: Important notes
使用 Oracle Data Pump 还需要考虑 performance tips .
history_pk_prepare.sql
.shell> cd /path/to/zabbix-sources/database/oracle
shell> sqlplus zabbix/[email protected]_host/ORCL
sqlplus> @history_pk_prepare.sql
例:
# mkdir -pv /export/history
# chown -R oracle:oracle /export
create directory history as '/export/history';
grant read,write on directory history to zabbix;
expdp zabbix/[email protected]:1521/z \
DIRECTORY=history \
TABLES=history_old,history_uint_old,history_str_old,history_log_old,history_text_old \
PARALLEL=N
impdp zabbix/[email protected]:1521/z \
DIRECTORY=history \
TABLES=history_uint_old \
REMAP_TABLE=history_old:history,history_uint_old:history_uint,history_str_old:history_str,history_log_old:history_log,history_text_old:history_text \
data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
确保运行一切正常后
删除旧表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;
使用 Oracle Data Pump 还需要考虑 performance tips
history_pk_prepare.sql
.shell> cd /path/to/zabbix-sources/database/oracle
shell> sqlplus zabbix/[email protected]_host/ORCL
sqlplus> @history_pk_prepare.sql
例:
# mkdir -pv /export/history /export/history_uint /export/history_str /export/history_log /export/history_text
# chown -R oracle:oracle /export
create directory history as '/export/history';
grant read,write on directory history to zabbix;
create directory history_uint as '/export/history_uint';
grant read,write on directory history_uint to zabbix;
create directory history_str as '/export/history_str';
grant read,write on directory history_str to zabbix;
create directory history_log as '/export/history_log';
grant read,write on directory history_log to zabbix;
create directory history_text as '/export/history_text';
grant read,write on directory history_text to zabbix;
expdp zabbix/[email protected]:1521/xe DIRECTORY=history TABLES=history_old PARALLEL=N
impdp zabbix/[email protected]:1521/xe DIRECTORY=history TABLES=history_old REMAP_TABLE=history_old:history data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
expdp zabbix/[email protected]:1521/xe DIRECTORY=history_uint TABLES=history_uint_old PARALLEL=N
impdp zabbix/[email protected]:1521/xe DIRECTORY=history_uint TABLES=history_uint_old REMAP_TABLE=history_uint_old:history_uint data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
expdp zabbix/[email protected]:1521/xe DIRECTORY=history_str TABLES=history_str_old PARALLEL=N
impdp zabbix/[email protected]:1521/xe DIRECTORY=history_str TABLES=history_str_old REMAP_TABLE=history_str_old:history_str data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
expdp zabbix/[email protected]:1521/xe DIRECTORY=history_log TABLES=history_log_old PARALLEL=N
impdp zabbix/[email protected]:1521/xe DIRECTORY=history_log TABLES=history_log_old REMAP_TABLE=history_log_old:history_log data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
expdp zabbix/[email protected]:1521/xe DIRECTORY=history_text TABLES=history_text_old PARALLEL=N
impdp zabbix/[email protected]:1521/xe DIRECTORY=history_text TABLES=history_text_old REMAP_TABLE=history_text_old:history_text data_options=SKIP_CONSTRAINT_ERRORS table_exists_action=APPEND PARALLEL=N CONTENT=data_only
确保上面的步骤都运行正确
删除旧表
DROP TABLE history_old;
DROP TABLE history_uint_old;
DROP TABLE history_str_old;
DROP TABLE history_log_old;
DROP TABLE history_text_old;