Logo Search packages:      
Sourcecode: postgresql-8.4 version File versions  Download package

portalmem.c

/*-------------------------------------------------------------------------
 *
 * portalmem.c
 *      backend portal memory management
 *
 * Portals are objects representing the execution state of a query.
 * This module provides memory management services for portals, but it
 * doesn't actually run the executor for them.
 *
 *
 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 * IDENTIFICATION
 *      $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.113.2.3 2010/02/18 03:06:53 tgl Exp $
 *
 *-------------------------------------------------------------------------
 */
#include "postgres.h"

#include "access/xact.h"
#include "catalog/pg_type.h"
#include "commands/portalcmds.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/memutils.h"

/*
 * Estimate of the maximum number of open portals a user would have,
 * used in initially sizing the PortalHashTable in EnablePortalManager().
 * Since the hash table can expand, there's no need to make this overly
 * generous, and keeping it small avoids unnecessary overhead in the
 * hash_seq_search() calls executed during transaction end.
 */
#define PORTALS_PER_USER         16


/* ----------------
 *          Global state
 * ----------------
 */

#define MAX_PORTALNAME_LEN          NAMEDATALEN

typedef struct portalhashent
{
      char        portalname[MAX_PORTALNAME_LEN];
      Portal            portal;
} PortalHashEnt;

static HTAB *PortalHashTable = NULL;

#define PortalHashTableLookup(NAME, PORTAL) \
do { \
      PortalHashEnt *hentry; \
      \
      hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
                                                               (NAME), HASH_FIND, NULL); \
      if (hentry) \
            PORTAL = hentry->portal; \
      else \
            PORTAL = NULL; \
} while(0)

#define PortalHashTableInsert(PORTAL, NAME) \
do { \
      PortalHashEnt *hentry; bool found; \
      \
      hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
                                                               (NAME), HASH_ENTER, &found); \
      if (found) \
            elog(ERROR, "duplicate portal name"); \
      hentry->portal = PORTAL; \
      /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
      PORTAL->name = hentry->portalname; \
} while(0)

#define PortalHashTableDelete(PORTAL) \
do { \
      PortalHashEnt *hentry; \
      \
      hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
                                                               PORTAL->name, HASH_REMOVE, NULL); \
      if (hentry == NULL) \
            elog(WARNING, "trying to delete portal name that does not exist"); \
} while(0)

static MemoryContext PortalMemory = NULL;


/* ----------------------------------------------------------------
 *                         public portal interface functions
 * ----------------------------------------------------------------
 */

/*
 * EnablePortalManager
 *          Enables the portal management module at backend startup.
 */
void
EnablePortalManager(void)
{
      HASHCTL           ctl;

      Assert(PortalMemory == NULL);

      PortalMemory = AllocSetContextCreate(TopMemoryContext,
                                                             "PortalMemory",
                                                             ALLOCSET_DEFAULT_MINSIZE,
                                                             ALLOCSET_DEFAULT_INITSIZE,
                                                             ALLOCSET_DEFAULT_MAXSIZE);

      ctl.keysize = MAX_PORTALNAME_LEN;
      ctl.entrysize = sizeof(PortalHashEnt);

      /*
       * use PORTALS_PER_USER as a guess of how many hash table entries to
       * create, initially
       */
      PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
                                                  &ctl, HASH_ELEM);
}

/*
 * GetPortalByName
 *          Returns a portal given a portal name, or NULL if name not found.
 */
Portal
GetPortalByName(const char *name)
{
      Portal            portal;

      if (PointerIsValid(name))
            PortalHashTableLookup(name, portal);
      else
            portal = NULL;

      return portal;
}

/*
 * PortalListGetPrimaryStmt
 *          Get the "primary" stmt within a portal, ie, the one marked canSetTag.
 *
 * Returns NULL if no such stmt.  If multiple PlannedStmt structs within the
 * portal are marked canSetTag, returns the first one.      Neither of these
 * cases should occur in present usages of this function.
 *
 * Copes if given a list of Querys --- can't happen in a portal, but this
 * code also supports plancache.c, which needs both cases.
 *
 * Note: the reason this is just handed a List is so that plancache.c
 * can share the code.  For use with a portal, use PortalGetPrimaryStmt
 * rather than calling this directly.
 */
Node *
PortalListGetPrimaryStmt(List *stmts)
{
      ListCell   *lc;

      foreach(lc, stmts)
      {
            Node     *stmt = (Node *) lfirst(lc);

            if (IsA(stmt, PlannedStmt))
            {
                  if (((PlannedStmt *) stmt)->canSetTag)
                        return stmt;
            }
            else if (IsA(stmt, Query))
            {
                  if (((Query *) stmt)->canSetTag)
                        return stmt;
            }
            else
            {
                  /* Utility stmts are assumed canSetTag if they're the only stmt */
                  if (list_length(stmts) == 1)
                        return stmt;
            }
      }
      return NULL;
}

/*
 * CreatePortal
 *          Returns a new portal given a name.
 *
 * allowDup: if true, automatically drop any pre-existing portal of the
 * same name (if false, an error is raised).
 *
 * dupSilent: if true, don't even emit a WARNING.
 */
Portal
CreatePortal(const char *name, bool allowDup, bool dupSilent)
{
      Portal            portal;

      AssertArg(PointerIsValid(name));

      portal = GetPortalByName(name);
      if (PortalIsValid(portal))
      {
            if (!allowDup)
                  ereport(ERROR,
                              (errcode(ERRCODE_DUPLICATE_CURSOR),
                               errmsg("cursor \"%s\" already exists", name)));
            if (!dupSilent)
                  ereport(WARNING,
                              (errcode(ERRCODE_DUPLICATE_CURSOR),
                               errmsg("closing existing cursor \"%s\"",
                                          name)));
            PortalDrop(portal, false);
      }

      /* make new portal structure */
      portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);

      /* initialize portal heap context; typically it won't store much */
      portal->heap = AllocSetContextCreate(PortalMemory,
                                                             "PortalHeapMemory",
                                                             ALLOCSET_SMALL_MINSIZE,
                                                             ALLOCSET_SMALL_INITSIZE,
                                                             ALLOCSET_SMALL_MAXSIZE);

      /* create a resource owner for the portal */
      portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
                                                               "Portal");

      /* initialize portal fields that don't start off zero */
      portal->status = PORTAL_NEW;
      portal->cleanup = PortalCleanup;
      portal->createSubid = GetCurrentSubTransactionId();
      portal->strategy = PORTAL_MULTI_QUERY;
      portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
      portal->atStart = true;
      portal->atEnd = true;         /* disallow fetches until query is set */
      portal->visible = true;
      portal->creation_time = GetCurrentStatementStartTimestamp();

      /* put portal in table (sets portal->name) */
      PortalHashTableInsert(portal, name);

      return portal;
}

/*
 * CreateNewPortal
 *          Create a new portal, assigning it a random nonconflicting name.
 */
Portal
CreateNewPortal(void)
{
      static unsigned int unnamed_portal_count = 0;

      char        portalname[MAX_PORTALNAME_LEN];

      /* Select a nonconflicting name */
      for (;;)
      {
            unnamed_portal_count++;
            sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
            if (GetPortalByName(portalname) == NULL)
                  break;
      }

      return CreatePortal(portalname, false, false);
}

/*
 * PortalDefineQuery
 *          A simple subroutine to establish a portal's query.
 *
 * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
 * allowed anymore to pass NULL.  (If you really don't have source text,
 * you can pass a constant string, perhaps "(query not available)".)
 *
 * commandTag shall be NULL if and only if the original query string
 * (before rewriting) was an empty string.      Also, the passed commandTag must
 * be a pointer to a constant string, since it is not copied.
 *
 * If cplan is provided, then it is a cached plan containing the stmts,
 * and the caller must have done RevalidateCachedPlan(), causing a refcount
 * increment.  The refcount will be released when the portal is destroyed.
 *
 * If cplan is NULL, then it is the caller's responsibility to ensure that
 * the passed plan trees have adequate lifetime.  Typically this is done by
 * copying them into the portal's heap context.
 *
 * The caller is also responsible for ensuring that the passed prepStmtName
 * (if not NULL) and sourceText have adequate lifetime.
 *
 * NB: this function mustn't do much beyond storing the passed values; in
 * particular don't do anything that risks elog(ERROR).  If that were to
 * happen here before storing the cplan reference, we'd leak the plancache
 * refcount that the caller is trying to hand off to us.
 */
void
PortalDefineQuery(Portal portal,
                          const char *prepStmtName,
                          const char *sourceText,
                          const char *commandTag,
                          List *stmts,
                          CachedPlan *cplan)
{
      AssertArg(PortalIsValid(portal));
      AssertState(portal->status == PORTAL_NEW);

      AssertArg(sourceText != NULL);
      AssertArg(commandTag != NULL || stmts == NIL);

      portal->prepStmtName = prepStmtName;
      portal->sourceText = sourceText;
      portal->commandTag = commandTag;
      portal->stmts = stmts;
      portal->cplan = cplan;
      portal->status = PORTAL_DEFINED;
}

/*
 * PortalReleaseCachedPlan
 *          Release a portal's reference to its cached plan, if any.
 */
static void
PortalReleaseCachedPlan(Portal portal)
{
      if (portal->cplan)
      {
            ReleaseCachedPlan(portal->cplan, false);
            portal->cplan = NULL;

            /*
             * We must also clear portal->stmts which is now a dangling
             * reference to the cached plan's plan list.  This protects any
             * code that might try to examine the Portal later.
             */
            portal->stmts = NIL;
      }
}

/*
 * PortalCreateHoldStore
 *          Create the tuplestore for a portal.
 */
void
PortalCreateHoldStore(Portal portal)
{
      MemoryContext oldcxt;

      Assert(portal->holdContext == NULL);
      Assert(portal->holdStore == NULL);

      /*
       * Create the memory context that is used for storage of the tuple set.
       * Note this is NOT a child of the portal's heap memory.
       */
      portal->holdContext =
            AllocSetContextCreate(PortalMemory,
                                            "PortalHoldContext",
                                            ALLOCSET_DEFAULT_MINSIZE,
                                            ALLOCSET_DEFAULT_INITSIZE,
                                            ALLOCSET_DEFAULT_MAXSIZE);

      /*
       * Create the tuple store, selecting cross-transaction temp files, and
       * enabling random access only if cursor requires scrolling.
       *
       * XXX: Should maintenance_work_mem be used for the portal size?
       */
      oldcxt = MemoryContextSwitchTo(portal->holdContext);

      portal->holdStore =
            tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
                                            true, work_mem);

      MemoryContextSwitchTo(oldcxt);
}

/*
 * PortalDrop
 *          Destroy the portal.
 */
void
PortalDrop(Portal portal, bool isTopCommit)
{
      AssertArg(PortalIsValid(portal));

      /* Not sure if this case can validly happen or not... */
      if (portal->status == PORTAL_ACTIVE)
            elog(ERROR, "cannot drop active portal");

      /*
       * Remove portal from hash table.  Because we do this first, we will not
       * come back to try to remove the portal again if there's any error in the
       * subsequent steps.  Better to leak a little memory than to get into an
       * infinite error-recovery loop.
       */
      PortalHashTableDelete(portal);

      /* let portalcmds.c clean up the state it knows about */
      if (PointerIsValid(portal->cleanup))
            (*portal->cleanup) (portal);

      /* drop cached plan reference, if any */
      PortalReleaseCachedPlan(portal);

      /*
       * Release any resources still attached to the portal.      There are several
       * cases being covered here:
       *
       * Top transaction commit (indicated by isTopCommit): normally we should
       * do nothing here and let the regular end-of-transaction resource
       * releasing mechanism handle these resources too.    However, if we have a
       * FAILED portal (eg, a cursor that got an error), we'd better clean up
       * its resources to avoid resource-leakage warning messages.
       *
       * Sub transaction commit: never comes here at all, since we don't kill
       * any portals in AtSubCommit_Portals().
       *
       * Main or sub transaction abort: we will do nothing here because
       * portal->resowner was already set NULL; the resources were already
       * cleaned up in transaction abort.
       *
       * Ordinary portal drop: must release resources.  However, if the portal
       * is not FAILED then we do not release its locks.    The locks become the
       * responsibility of the transaction's ResourceOwner (since it is the
       * parent of the portal's owner) and will be released when the transaction
       * eventually ends.
       */
      if (portal->resowner &&
            (!isTopCommit || portal->status == PORTAL_FAILED))
      {
            bool        isCommit = (portal->status != PORTAL_FAILED);

            ResourceOwnerRelease(portal->resowner,
                                           RESOURCE_RELEASE_BEFORE_LOCKS,
                                           isCommit, false);
            ResourceOwnerRelease(portal->resowner,
                                           RESOURCE_RELEASE_LOCKS,
                                           isCommit, false);
            ResourceOwnerRelease(portal->resowner,
                                           RESOURCE_RELEASE_AFTER_LOCKS,
                                           isCommit, false);
            ResourceOwnerDelete(portal->resowner);
      }
      portal->resowner = NULL;

      /*
       * Delete tuplestore if present.  We should do this even under error
       * conditions; since the tuplestore would have been using cross-
       * transaction storage, its temp files need to be explicitly deleted.
       */
      if (portal->holdStore)
      {
            MemoryContext oldcontext;

            oldcontext = MemoryContextSwitchTo(portal->holdContext);
            tuplestore_end(portal->holdStore);
            MemoryContextSwitchTo(oldcontext);
            portal->holdStore = NULL;
      }

      /* delete tuplestore storage, if any */
      if (portal->holdContext)
            MemoryContextDelete(portal->holdContext);

      /* release subsidiary storage */
      MemoryContextDelete(PortalGetHeapMemory(portal));

      /* release portal struct (it's in PortalMemory) */
      pfree(portal);
}

/*
 * Delete all declared cursors.
 *
 * Used by commands: CLOSE ALL, DISCARD ALL
 */
void
PortalHashTableDeleteAll(void)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      if (PortalHashTable == NULL)
            return;

      hash_seq_init(&status, PortalHashTable);
      while ((hentry = hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            if (portal->status != PORTAL_ACTIVE)
                  PortalDrop(portal, false);
      }
}


/*
 * Pre-commit processing for portals.
 *
 * Any holdable cursors created in this transaction need to be converted to
 * materialized form, since we are going to close down the executor and
 * release locks.  Other portals are not touched yet.
 *
 * Returns TRUE if any holdable cursors were processed, FALSE if not.
 */
bool
CommitHoldablePortals(void)
{
      bool        result = false;
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            /* Is it a holdable portal created in the current xact? */
            if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
                  portal->createSubid != InvalidSubTransactionId &&
                  portal->status == PORTAL_READY)
            {
                  /*
                   * We are exiting the transaction that created a holdable cursor.
                   * Instead of dropping the portal, prepare it for access by later
                   * transactions.
                   *
                   * Note that PersistHoldablePortal() must release all resources
                   * used by the portal that are local to the creating transaction.
                   */
                  PortalCreateHoldStore(portal);
                  PersistHoldablePortal(portal);

                  /* drop cached plan reference, if any */
                  PortalReleaseCachedPlan(portal);

                  /*
                   * Any resources belonging to the portal will be released in the
                   * upcoming transaction-wide cleanup; the portal will no longer
                   * have its own resources.
                   */
                  portal->resowner = NULL;

                  /*
                   * Having successfully exported the holdable cursor, mark it as
                   * not belonging to this transaction.
                   */
                  portal->createSubid = InvalidSubTransactionId;

                  result = true;
            }
      }

      return result;
}

/*
 * Pre-prepare processing for portals.
 *
 * Currently we refuse PREPARE if the transaction created any holdable
 * cursors, since it's quite unclear what to do with one.  However, this
 * has the same API as CommitHoldablePortals and is invoked in the same
 * way by xact.c, so that we can easily do something reasonable if anyone
 * comes up with something reasonable to do.
 *
 * Returns TRUE if any holdable cursors were processed, FALSE if not.
 */
bool
PrepareHoldablePortals(void)
{
      bool        result = false;
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            /* Is it a holdable portal created in the current xact? */
            if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
                  portal->createSubid != InvalidSubTransactionId &&
                  portal->status == PORTAL_READY)
            {
                  /*
                   * We are exiting the transaction that created a holdable cursor.
                   * Can't do PREPARE.
                   */
                  ereport(ERROR,
                              (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                               errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
            }
      }

      return result;
}

/*
 * Pre-commit processing for portals.
 *
 * Remove all non-holdable portals created in this transaction.
 * Portals remaining from prior transactions should be left untouched.
 */
void
AtCommit_Portals(void)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            /*
             * Do not touch active portals --- this can only happen in the case of
             * a multi-transaction utility command, such as VACUUM.
             *
             * Note however that any resource owner attached to such a portal is
             * still going to go away, so don't leave a dangling pointer.
             */
            if (portal->status == PORTAL_ACTIVE)
            {
                  portal->resowner = NULL;
                  continue;
            }

            /*
             * Do nothing to cursors held over from a previous transaction
             * (including holdable ones just frozen by CommitHoldablePortals).
             */
            if (portal->createSubid == InvalidSubTransactionId)
                  continue;

            /* Zap all non-holdable portals */
            PortalDrop(portal, true);

            /* Restart the iteration in case that led to other drops */
            /* XXX is this really necessary? */
            hash_seq_term(&status);
            hash_seq_init(&status, PortalHashTable);
      }
}

/*
 * Abort processing for portals.
 *
 * At this point we reset "active" status and run the cleanup hook if
 * present, but we can't release the portal's memory until the cleanup call.
 *
 * The reason we need to reset active is so that we can replace the unnamed
 * portal, else we'll fail to execute ROLLBACK when it arrives.
 */
void
AtAbort_Portals(void)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            /* Any portal that was actually running has to be considered broken */
            if (portal->status == PORTAL_ACTIVE)
                  portal->status = PORTAL_FAILED;

            /*
             * Do nothing else to cursors held over from a previous transaction.
             */
            if (portal->createSubid == InvalidSubTransactionId)
                  continue;

            /*
             * If it was created in the current transaction, we can't do normal
             * shutdown on a READY portal either; it might refer to objects
             * created in the failed transaction.  See comments in
             * AtSubAbort_Portals.
             */
            if (portal->status == PORTAL_READY)
                  portal->status = PORTAL_FAILED;

            /* let portalcmds.c clean up the state it knows about */
            if (PointerIsValid(portal->cleanup))
            {
                  (*portal->cleanup) (portal);
                  portal->cleanup = NULL;
            }

            /* drop cached plan reference, if any */
            PortalReleaseCachedPlan(portal);

            /*
             * Any resources belonging to the portal will be released in the
             * upcoming transaction-wide cleanup; they will be gone before we run
             * PortalDrop.
             */
            portal->resowner = NULL;

            /*
             * Although we can't delete the portal data structure proper, we can
             * release any memory in subsidiary contexts, such as executor state.
             * The cleanup hook was the last thing that might have needed data
             * there.
             */
            MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
      }
}

/*
 * Post-abort cleanup for portals.
 *
 * Delete all portals not held over from prior transactions.  */
void
AtCleanup_Portals(void)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            /* Do nothing to cursors held over from a previous transaction */
            if (portal->createSubid == InvalidSubTransactionId)
            {
                  Assert(portal->status != PORTAL_ACTIVE);
                  Assert(portal->resowner == NULL);
                  continue;
            }

            /* Else zap it. */
            PortalDrop(portal, false);
      }
}

/*
 * Pre-subcommit processing for portals.
 *
 * Reassign the portals created in the current subtransaction to the parent
 * subtransaction.
 */
void
AtSubCommit_Portals(SubTransactionId mySubid,
                              SubTransactionId parentSubid,
                              ResourceOwner parentXactOwner)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            if (portal->createSubid == mySubid)
            {
                  portal->createSubid = parentSubid;
                  if (portal->resowner)
                        ResourceOwnerNewParent(portal->resowner, parentXactOwner);
            }
      }
}

/*
 * Subtransaction abort handling for portals.
 *
 * Deactivate portals created during the failed subtransaction.
 * Note that per AtSubCommit_Portals, this will catch portals created
 * in descendants of the subtransaction too.
 *
 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
 */
void
AtSubAbort_Portals(SubTransactionId mySubid,
                           SubTransactionId parentSubid,
                           ResourceOwner parentXactOwner)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            if (portal->createSubid != mySubid)
                  continue;

            /*
             * Force any live portals of my own subtransaction into FAILED state.
             * We have to do this because they might refer to objects created or
             * changed in the failed subtransaction, leading to crashes if
             * execution is resumed, or even if we just try to run ExecutorEnd.
             * (Note we do NOT do this to upper-level portals, since they cannot
             * have such references and hence may be able to continue.)
             */
            if (portal->status == PORTAL_READY ||
                  portal->status == PORTAL_ACTIVE)
                  portal->status = PORTAL_FAILED;

            /* let portalcmds.c clean up the state it knows about */
            if (PointerIsValid(portal->cleanup))
            {
                  (*portal->cleanup) (portal);
                  portal->cleanup = NULL;
            }

            /* drop cached plan reference, if any */
            PortalReleaseCachedPlan(portal);

            /*
             * Any resources belonging to the portal will be released in the
             * upcoming transaction-wide cleanup; they will be gone before we
             * run PortalDrop.
             */
            portal->resowner = NULL;

            /*
             * Although we can't delete the portal data structure proper, we
             * can release any memory in subsidiary contexts, such as executor
             * state.  The cleanup hook was the last thing that might have
             * needed data there.
             */
            MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
      }
}

/*
 * Post-subabort cleanup for portals.
 *
 * Drop all portals created in the failed subtransaction (but note that
 * we will not drop any that were reassigned to the parent above).
 */
void
AtSubCleanup_Portals(SubTransactionId mySubid)
{
      HASH_SEQ_STATUS status;
      PortalHashEnt *hentry;

      hash_seq_init(&status, PortalHashTable);

      while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
      {
            Portal            portal = hentry->portal;

            if (portal->createSubid != mySubid)
                  continue;

            /* Zap it. */
            PortalDrop(portal, false);
      }
}

/* Find all available cursors */
Datum
pg_cursor(PG_FUNCTION_ARGS)
{
      ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
      TupleDesc   tupdesc;
      Tuplestorestate *tupstore;
      MemoryContext per_query_ctx;
      MemoryContext oldcontext;
      HASH_SEQ_STATUS hash_seq;
      PortalHashEnt *hentry;

      /* check to see if caller supports us returning a tuplestore */
      if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
            ereport(ERROR,
                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                         errmsg("set-valued function called in context that cannot accept a set")));
      if (!(rsinfo->allowedModes & SFRM_Materialize))
            ereport(ERROR,
                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                         errmsg("materialize mode required, but it is not " \
                                    "allowed in this context")));

      /* need to build tuplestore in query context */
      per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
      oldcontext = MemoryContextSwitchTo(per_query_ctx);

      /*
       * build tupdesc for result tuples. This must match the definition of the
       * pg_cursors view in system_views.sql
       */
      tupdesc = CreateTemplateTupleDesc(6, false);
      TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
                                 TEXTOID, -1, 0);
      TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
                                 TEXTOID, -1, 0);
      TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
                                 BOOLOID, -1, 0);
      TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
                                 BOOLOID, -1, 0);
      TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
                                 BOOLOID, -1, 0);
      TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
                                 TIMESTAMPTZOID, -1, 0);

      /*
       * We put all the tuples into a tuplestore in one scan of the hashtable.
       * This avoids any issue of the hashtable possibly changing between calls.
       */
      tupstore =
            tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
                                            false, work_mem);

      /* generate junk in short-term context */
      MemoryContextSwitchTo(oldcontext);

      hash_seq_init(&hash_seq, PortalHashTable);
      while ((hentry = hash_seq_search(&hash_seq)) != NULL)
      {
            Portal            portal = hentry->portal;
            Datum       values[6];
            bool        nulls[6];

            /* report only "visible" entries */
            if (!portal->visible)
                  continue;

            MemSet(nulls, 0, sizeof(nulls));

            values[0] = CStringGetTextDatum(portal->name);
            values[1] = CStringGetTextDatum(portal->sourceText);
            values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
            values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
            values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
            values[5] = TimestampTzGetDatum(portal->creation_time);

            tuplestore_putvalues(tupstore, tupdesc, values, nulls);
      }

      /* clean up and return the tuplestore */
      tuplestore_donestoring(tupstore);

      rsinfo->returnMode = SFRM_Materialize;
      rsinfo->setResult = tupstore;
      rsinfo->setDesc = tupdesc;

      return (Datum) 0;
}

Generated by  Doxygen 1.6.0   Back to index