Fix crash in multi-insert COPY
authorPeter Eisentraut <peter_e@gmx.net>
Wed, 17 Oct 2018 18:31:20 +0000 (20:31 +0200)
committerPeter Eisentraut <peter_e@gmx.net>
Wed, 17 Oct 2018 18:31:20 +0000 (20:31 +0200)
A bug introduced in 0d5f05cde011512e605bb2688d9b1fbb5b3ae152
considered the *previous* partition's triggers when deciding whether
multi-insert can be used.  Rearrange the code so that the current
partition is considered.

Author: Ashutosh Sharma <ashu.coek88@gmail.com>

src/backend/commands/copy.c
src/test/regress/input/copy.source
src/test/regress/output/copy.source

index 86b0fb300ff8fb0931cd4958ee5f8cfed9fd4b98..b58a74f4e3db9d8e8f30d35e08ca84814742faa3 100644 (file)
@@ -2783,21 +2783,7 @@ CopyFrom(CopyState cstate)
                        lastPartitionSampleLineNo = cstate->cur_lineno;
                        nPartitionChanges = 0;
                    }
-
-                   /*
-                    * Tests have shown that using multi-inserts when the
-                    * partition changes on every tuple slightly decreases the
-                    * performance, however, there are benefits even when only
-                    * some batches have just 2 tuples, so let's enable
-                    * multi-inserts even when the average is quite low.
-                    */
-                   leafpart_use_multi_insert = avgTuplesPerPartChange >= 1.3 &&
-                       !has_before_insert_row_trig &&
-                       !has_instead_insert_row_trig &&
-                       resultRelInfo->ri_FdwRoutine == NULL;
                }
-               else
-                   leafpart_use_multi_insert = false;
 
                /*
                 * Overwrite resultRelInfo with the corresponding partition's
@@ -2821,6 +2807,19 @@ CopyFrom(CopyState cstate)
                has_instead_insert_row_trig = (resultRelInfo->ri_TrigDesc &&
                                               resultRelInfo->ri_TrigDesc->trig_insert_instead_row);
 
+               /*
+                * Tests have shown that using multi-inserts when the
+                * partition changes on every tuple slightly decreases the
+                * performance, however, there are benefits even when only
+                * some batches have just 2 tuples, so let's enable
+                * multi-inserts even when the average is quite low.
+                */
+               leafpart_use_multi_insert = insertMethod == CIM_MULTI_CONDITIONAL &&
+                   avgTuplesPerPartChange >= 1.3 &&
+                   !has_before_insert_row_trig &&
+                   !has_instead_insert_row_trig &&
+                   resultRelInfo->ri_FdwRoutine == NULL;
+
                /*
                 * We'd better make the bulk insert mechanism gets a new
                 * buffer when the partition being inserted into changes.
index 014b1b5711dc96d257db980395ad7f2f2edab1a7..4cb03c566fabc73173e66a639113142458c6cdb9 100644 (file)
@@ -162,4 +162,23 @@ copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
 select tableoid::regclass,count(*),sum(a) from parted_copytest
 group by tableoid order by tableoid::regclass::name;
 
+truncate parted_copytest;
+
+-- create before insert row trigger on parted_copytest_a2
+create function part_ins_func() returns trigger language plpgsql as $$
+begin
+  return new;
+end;
+$$;
+
+create trigger part_ins_trig
+   before insert on parted_copytest_a2
+   for each row
+   execute procedure part_ins_func();
+
+copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
+
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+
 drop table parted_copytest;
index ab096153ad5ea8b26964f99afe488351d9e28233..ddd652c7128fe6d52332d7db4e42dc95ff5e2fa9 100644 (file)
@@ -121,4 +121,24 @@ group by tableoid order by tableoid::regclass::name;
  parted_copytest_a2 |    10 |  10055
 (2 rows)
 
+truncate parted_copytest;
+-- create before insert row trigger on parted_copytest_a2
+create function part_ins_func() returns trigger language plpgsql as $$
+begin
+  return new;
+end;
+$$;
+create trigger part_ins_trig
+   before insert on parted_copytest_a2
+   for each row
+   execute procedure part_ins_func();
+copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+      tableoid      | count |  sum   
+--------------------+-------+--------
+ parted_copytest_a1 |  1010 | 510655
+ parted_copytest_a2 |    10 |  10055
+(2 rows)
+
 drop table parted_copytest;