I'm trying to iteratively pull data from MySQL and build a table in RedShift. But getting this error on APPEND
db = <some_db>
table_name = 'test'
table_name_full = f'{db}.{table_name}'
cols = ['settings', 'fields', 'design', 'confirmation']
chunk_name = 'test_1'
bucket = <some_bucket>
key = 'test.csv'
iam_role = <some_role>
query = f"""
DROP TABLE IF EXISTS {chunk_name};
CREATE TABLE {chunk_name} (
"id" integer, "{'" super, "'.join(col)}" super
);
COPY {chunk_name} ("id", "{'", "'.join(col)}")
FROM 's3://{bucket}/{key}'
iam_role '{iam_role}'
ignoreheader 1
CSV;
alter table {table_name_full} append from {chunk_name};
drop table {chunk_name};
"""
session = boto3.session.Session(
aws_access_key_id=<some_id>,
aws_secret_access_key=<some_key>,
aws_session_token=<some_token>
)
config = Config(connect_timeout=180, read_timeout=180)
client_redshift = session.client("redshift-data", config=config)
result = client_redshift.execute_statement(Database=db,
SecretArn=secret_arn,
Sql=query,
ClusterIdentifier=cluster_id)
n = 10
for i in range(n):
stmtid = result['Id']
response = client_redshift.describe_statement(Id=stmtid)
print(len(response) > 1)
print('RESPONSE: ', response)
print('-'*100)
time.sleep(10)
i tried adding set autocommit on; and set autocommit off; around the alter, but it just confused the compiler i guess.