VISHORIZON_TEMP
} GlobalVisHorizonKind;
+/*
+ * Reason codes for KnownAssignedXidsCompress().
+ */
+typedef enum KAXCompressReason
+{
+ KAX_NO_SPACE, /* need to free up space at array end */
+ KAX_PRUNE, /* we just pruned old entries */
+ KAX_TRANSACTION_END, /* we just committed/removed some XIDs */
+ KAX_STARTUP_PROCESS_IDLE /* startup process is about to sleep */
+} KAXCompressReason;
+
static ProcArrayStruct *procArray;
#endif /* XIDCACHE_DEBUG */
/* Primitives for KnownAssignedXids array handling for standby */
-static void KnownAssignedXidsCompress(bool force);
+static void KnownAssignedXidsCompress(KAXCompressReason reason, bool haveLock);
static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
bool exclusive_lock);
static bool KnownAssignedXidsSearch(TransactionId xid, bool remove);
LWLockRelease(ProcArrayLock);
}
+/*
+ * KnownAssignedTransactionIdsIdleMaintenance
+ * Opportunistically do maintenance work when the startup process
+ * is about to go idle.
+ */
+void
+KnownAssignedTransactionIdsIdleMaintenance(void)
+{
+ KnownAssignedXidsCompress(KAX_STARTUP_PROCESS_IDLE, false);
+}
+
/*
* Private module functions to manipulate KnownAssignedXids
* so there is an optimal point for any workload mix. We use a heuristic to
* decide when to compress the array, though trimming also helps reduce
* frequency of compressing. The heuristic requires us to track the number of
- * currently valid XIDs in the array.
+ * currently valid XIDs in the array (N). Except in special cases, we'll
+ * compress when S >= 2N. Bounding S at 2N in turn bounds the time for
+ * taking a snapshot to be O(N), which it would have to be anyway.
*/
* Compress KnownAssignedXids by shifting valid data down to the start of the
* array, removing any gaps.
*
- * A compression step is forced if "force" is true, otherwise we do it
- * only if a heuristic indicates it's a good time to do it.
+ * A compression step is forced if "reason" is KAX_NO_SPACE, otherwise
+ * we do it only if a heuristic indicates it's a good time to do it.
*
- * Caller must hold ProcArrayLock in exclusive mode.
+ * Compression requires holding ProcArrayLock in exclusive mode.
+ * Caller must pass haveLock = true if it already holds the lock.
*/
static void
-KnownAssignedXidsCompress(bool force)
+KnownAssignedXidsCompress(KAXCompressReason reason, bool haveLock)
{
ProcArrayStruct *pArray = procArray;
int head,
- tail;
+ tail,
+ nelements;
int compress_index;
int i;
- /* no spinlock required since we hold ProcArrayLock exclusively */
+ /* Counters for compression heuristics */
+ static unsigned int transactionEndsCounter;
+ static TimestampTz lastCompressTs;
+
+ /* Tuning constants */
+#define KAX_COMPRESS_FREQUENCY 128 /* in transactions */
+#define KAX_COMPRESS_IDLE_INTERVAL 1000 /* in ms */
+
+ /*
+ * Since only the startup process modifies the head/tail pointers, we
+ * don't need a lock to read them here.
+ */
head = pArray->headKnownAssignedXids;
tail = pArray->tailKnownAssignedXids;
+ nelements = head - tail;
- if (!force)
+ /*
+ * If we can choose whether to compress, use a heuristic to avoid
+ * compressing too often or not often enough. "Compress" here simply
+ * means moving the values to the beginning of the array, so it is not as
+ * complex or costly as typical data compression algorithms.
+ */
+ if (nelements == pArray->numKnownAssignedXids)
{
/*
- * If we can choose how much to compress, use a heuristic to avoid
- * compressing too often or not often enough.
- *
- * Heuristic is if we have a large enough current spread and less than
- * 50% of the elements are currently in use, then compress. This
- * should ensure we compress fairly infrequently. We could compress
- * less often though the virtual array would spread out more and
- * snapshots would become more expensive.
+ * When there are no gaps between head and tail, don't bother to
+ * compress, except in the KAX_NO_SPACE case where we must compress to
+ * create some space after the head.
+ */
+ if (reason != KAX_NO_SPACE)
+ return;
+ }
+ else if (reason == KAX_TRANSACTION_END)
+ {
+ /*
+ * Consider compressing only once every so many commits. Frequency
+ * determined by benchmarks.
*/
- int nelements = head - tail;
+ if ((transactionEndsCounter++) % KAX_COMPRESS_FREQUENCY != 0)
+ return;
- if (nelements < 4 * PROCARRAY_MAXPROCS ||
- nelements < 2 * pArray->numKnownAssignedXids)
+ /*
+ * Furthermore, compress only if the used part of the array is less
+ * than 50% full (see comments above).
+ */
+ if (nelements < 2 * pArray->numKnownAssignedXids)
return;
}
+ else if (reason == KAX_STARTUP_PROCESS_IDLE)
+ {
+ /*
+ * We're about to go idle for lack of new WAL, so we might as well
+ * compress. But not too often, to avoid ProcArray lock contention
+ * with readers.
+ */
+ if (lastCompressTs != 0)
+ {
+ TimestampTz compress_after;
+
+ compress_after = TimestampTzPlusMilliseconds(lastCompressTs,
+ KAX_COMPRESS_IDLE_INTERVAL);
+ if (GetCurrentTimestamp() < compress_after)
+ return;
+ }
+ }
+
+ /* Need to compress, so get the lock if we don't have it. */
+ if (!haveLock)
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
/*
* We compress the array by reading the valid values from tail to head,
compress_index++;
}
}
+ Assert(compress_index == pArray->numKnownAssignedXids);
pArray->tailKnownAssignedXids = 0;
pArray->headKnownAssignedXids = compress_index;
+
+ if (!haveLock)
+ LWLockRelease(ProcArrayLock);
+
+ /* Update timestamp for maintenance. No need to hold lock for this. */
+ lastCompressTs = GetCurrentTimestamp();
}
/*
*/
if (head + nxids > pArray->maxKnownAssignedXids)
{
- /* must hold lock to compress */
- if (!exclusive_lock)
- LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
-
- KnownAssignedXidsCompress(true);
+ KnownAssignedXidsCompress(KAX_NO_SPACE, exclusive_lock);
head = pArray->headKnownAssignedXids;
/* note: we no longer care about the tail pointer */
- if (!exclusive_lock)
- LWLockRelease(ProcArrayLock);
-
/*
* If it still won't fit then we're out of memory
*/
KnownAssignedXidsRemove(subxids[i]);
/* Opportunistically compress the array */
- KnownAssignedXidsCompress(false);
+ KnownAssignedXidsCompress(KAX_TRANSACTION_END, true);
}
/*
}
/* Opportunistically compress the array */
- KnownAssignedXidsCompress(false);
+ KnownAssignedXidsCompress(KAX_PRUNE, true);
}
/*