aboutsummaryrefslogtreecommitdiff
path: root/src/pl/plpython/plpy_spi.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-03-12 16:05:10 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2016-03-12 16:05:29 -0500
commit23a27b039d94ba359286694831eafe03cd970eef (patch)
tree4b1d6acb2fe4ae5c1275292adb3f027f2e052ea8 /src/pl/plpython/plpy_spi.c
parente01157500f26342bf4f067a4eb1e45ab9a3cd410 (diff)
downloadpostgresql-23a27b039d94ba359286694831eafe03cd970eef.tar.gz
postgresql-23a27b039d94ba359286694831eafe03cd970eef.zip
Widen query numbers-of-tuples-processed counters to uint64.
This patch widens SPI_processed, EState's es_processed field, PortalData's portalPos field, FuncCallContext's call_cntr and max_calls fields, ExecutorRun's count argument, PortalRunFetch's result, and the max number of rows in a SPITupleTable to uint64, and deals with (I hope) all the ensuing fallout. Some of these values were declared uint32 before, and others "long". I also removed PortalData's posOverflow field, since that logic seems pretty useless given that portalPos is now always 64 bits. The user-visible results are that command tags for SELECT etc will correctly report tuple counts larger than 4G, as will plpgsql's GET GET DIAGNOSTICS ... ROW_COUNT command. Queries processing more tuples than that are still not exactly the norm, but they're becoming more common. Most values associated with FETCH/MOVE distances, such as PortalRun's count argument and the count argument of most SPI functions that have one, remain declared as "long". It's not clear whether it would be worth promoting those to int64; but it would definitely be a large dollop of additional API churn on top of this, and it would only help 32-bit platforms which seem relatively less likely to see any benefit. Andreas Scherbaum, reviewed by Christian Ullrich, additional hacking by me
Diffstat (limited to 'src/pl/plpython/plpy_spi.c')
-rw-r--r--src/pl/plpython/plpy_spi.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index 58e78ecebcb..7d84629f48f 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -6,6 +6,8 @@
#include "postgres.h"
+#include <limits.h>
+
#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/pg_type.h"
@@ -29,7 +31,8 @@
static PyObject *PLy_spi_execute_query(char *query, long limit);
static PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
-static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status);
+static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
+ uint64 rows, int status);
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
@@ -382,7 +385,7 @@ PLy_spi_execute_query(char *query, long limit)
}
static PyObject *
-PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
+PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status)
{
PLyResultObject *result;
volatile MemoryContext oldcontext;
@@ -394,16 +397,19 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
if (status > 0 && tuptable == NULL)
{
Py_DECREF(result->nrows);
- result->nrows = PyInt_FromLong(rows);
+ result->nrows = (rows > (uint64) LONG_MAX) ?
+ PyFloat_FromDouble((double) rows) :
+ PyInt_FromLong((long) rows);
}
else if (status > 0 && tuptable != NULL)
{
PLyTypeInfo args;
- int i;
MemoryContext cxt;
Py_DECREF(result->nrows);
- result->nrows = PyInt_FromLong(rows);
+ result->nrows = (rows > (uint64) LONG_MAX) ?
+ PyFloat_FromDouble((double) rows) :
+ PyInt_FromLong((long) rows);
cxt = AllocSetContextCreate(CurrentMemoryContext,
"PL/Python temp context",
@@ -419,6 +425,18 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
if (rows)
{
+ uint64 i;
+
+ /*
+ * PyList_New() and PyList_SetItem() use Py_ssize_t for list
+ * size and list indices; so we cannot support a result larger
+ * than PY_SSIZE_T_MAX.
+ */
+ if (rows > (uint64) PY_SSIZE_T_MAX)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("query result has too many rows to fit in a Python list")));
+
Py_DECREF(result->rows);
result->rows = PyList_New(rows);