2021-04-26 20:07:00 +02:00
|
|
|
#coding:utf-8
|
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
"""
|
|
|
|
ID: issue-6649
|
|
|
|
ISSUE: 6649
|
|
|
|
TITLE: FB crashes on attempt to create table with number of fields greater than 5460
|
|
|
|
DESCRIPTION:
|
|
|
|
It was found that maximal number of fields with type = BIGINT that could fit in a table DDL is 8066.
|
|
|
|
If this limit is exeeded then FB raises "new record size of N bytes is too big" (where N >= 65536).
|
|
|
|
We use for-loop with two iterations, each of them does following:
|
|
|
|
1. Create table with total number of fields = <N> (one for 'ID primary key' plus 8064 for
|
|
|
|
'user-data' fields with names 'F1', 'F2', ..., 'F'<N>-1). All of them have type = BIGINT.
|
|
|
|
2. DO RECONNECT // mandatory! otherwise crash can not be reproduced.
|
|
|
|
3. Run UPDATE OR INSERT statement that is specified in the ticker(insert single record with ID=1).
|
|
|
|
4. Run SELECT statement which calculates total sum on all 'user-data' fields.
|
|
|
|
When N = 8065 then these actions must complete successfully and result of final SELECT must be displayed.
|
|
|
|
When N = 8066 then we have to get exception:
|
|
|
|
Statement failed, SQLSTATE = 54000
|
|
|
|
unsuccessful metadata update
|
|
|
|
-new record size of 65536 bytes is too big
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
Confirmed bug on 4.0.0.2204: got crash when N=8065 (but still "new record size of 65536 bytes is too big" when N=8066).
|
|
|
|
Checked on 3.0.7.33368, 4.0.0.2214 - all OK.
|
|
|
|
JIRA: CORE-6411
|
2022-02-02 15:46:19 +01:00
|
|
|
FBTEST: bugs.core_6411
|
2022-01-27 20:08:36 +01:00
|
|
|
"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
import pytest
|
|
|
|
from firebird.qa import *
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
db = db_factory()
|
2021-12-15 22:02:07 +01:00
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
act = python_act('db', substitutions=[('.*(-)?After line \\d+.*', ''), ('[ \t]+', ' ')])
|
2021-04-26 20:07:00 +02:00
|
|
|
|
2022-01-27 20:08:36 +01:00
|
|
|
expected_stdout = """
|
2021-12-15 22:02:07 +01:00
|
|
|
step: 0, FLD_COUNT: 8064, result: FIELDS_TOTAL 32510016
|
|
|
|
step: 1, FLD_COUNT: 8065, result: Statement failed, SQLSTATE = 54000
|
|
|
|
step: 1, FLD_COUNT: 8065, result: unsuccessful metadata update
|
|
|
|
step: 1, FLD_COUNT: 8065, result: -new record size of 65536 bytes is too big
|
|
|
|
step: 1, FLD_COUNT: 8065, result: -TABLE TDATA
|
|
|
|
"""
|
2021-04-26 20:07:00 +02:00
|
|
|
|
|
|
|
@pytest.mark.version('>=3.0.7')
|
2022-01-27 20:08:36 +01:00
|
|
|
def test_1(act: Action, capsys):
|
2021-12-15 22:02:07 +01:00
|
|
|
for step in range(2):
|
|
|
|
FLD_COUNT = 8064 + step
|
|
|
|
ddl_init = 'recreate table tdata (id bigint primary key'
|
|
|
|
ddl_addi = '\n'.join([f',f{i} bigint' for i in range(1,FLD_COUNT)])
|
|
|
|
ddl_expr = ''.join([ddl_init, ddl_addi, ')'])
|
|
|
|
upd_init = 'update or insert into tdata values(1'
|
|
|
|
upd_addi = '\n'.join( [f',{i}' for i in range(1,FLD_COUNT)])
|
|
|
|
upd_expr = ''.join([upd_init, upd_addi, ') matching(id)'])
|
|
|
|
sel_init = 'select '
|
|
|
|
sel_addi = '+'.join([str(i) for i in range(0,FLD_COUNT)])
|
|
|
|
sel_expr = ''.join([sel_init, sel_addi, ' as fields_total from tdata'])
|
|
|
|
sql_expr= f"""
|
|
|
|
set bail on ;
|
|
|
|
{ddl_expr} ;
|
|
|
|
commit;
|
2022-01-27 20:08:36 +01:00
|
|
|
connect '{act.db.dsn}' user {act.db.user} password '{act.db.password}' ;
|
2021-12-15 22:02:07 +01:00
|
|
|
{upd_expr} ;
|
|
|
|
set list on ;
|
|
|
|
{sel_expr} ;
|
|
|
|
quit ;
|
2022-01-27 20:08:36 +01:00
|
|
|
"""
|
|
|
|
act.reset()
|
|
|
|
act.isql(switches=[], input=sql_expr, combine_output=True)
|
|
|
|
for line in act.string_strip(act.stdout).splitlines():
|
2021-12-15 22:02:07 +01:00
|
|
|
if line.strip():
|
|
|
|
print(f'step: {step}, FLD_COUNT: {FLD_COUNT}, result: {line}')
|
|
|
|
# Check
|
2022-01-27 20:08:36 +01:00
|
|
|
act.reset()
|
|
|
|
act.expected_stdout = expected_stdout
|
|
|
|
act.stdout = capsys.readouterr().out
|
|
|
|
assert act.clean_stdout == act.clean_expected_stdout
|