· KLDP.org · KLDP.net · KLDP Wiki · KLDP BBS ·
Asterisk Source/Select Poll

AsteriskSource/SelectPoll

* select
  • select(0,NULL,NULL,NULL,NULL)
If the readfs , writefs , and errorfds arguments are all null pointers and the timeout  argument is a null pointer, select()  blocks until interrupted by a signal.
The call returns the number of `ready' FDs found, and the three fdsets are
modified in-place, with only the ready FDs left in the sets. Use the
`FD_ISSET' macro to test the returned sets.

* poll
  • <sys/poll.h>
  • int poll(struct poolfd *ufds, unsigned int nfds, int timeout);
  
    #define POLLIN      0x0001  // ÀÐÀ» µ¥ÀÌŸ°¡ ÀÖ´Ù.
    #define POLLPRI     0x0002  // ±ä±ÞÇÑ ÀÐÀ» µ¥ÀÌŸ°¡ ÀÖ´Ù.
    #define POLLOUT     0x0004  // ¾²±â°¡ ºÀ¼â(block)°¡ ¾Æ´Ï´Ù. 
    #define POLLERR     0x0008  // ¿¡·¯¹ß»ý
    #define POLLHUP     0x0010  // ¿¬°áÀÌ ²÷°åÀ½
    #define POLLNVAL    0x0020  // ÆÄÀÏÁö½ÃÀÚ°¡ ¿­¸®Áö ¾ÊÀº°Í°°Àº
                                // Invalid request (À߸øµÈ ¿äû)
  • ¸¶Áö¸· ¾Æ±Ô¸ÕÆ®ÀÎ timeout ´Â select ÀÇ time ¿Í °°Àº ¿ªÇÒÀ» ÇÑ´Ù.
If none of the defined events have occurred on any selected file descriptor, poll() shall wait at least timeout milliseconds for an event to occur on any of the selected file descriptors. If the value of timeout is 0, poll() shall return immediately. If the value of timeout is -1, poll() shall block until a requested event occurs or until the call is interrupted.
RETURN value
   Upon successful completion, poll() shall return a non-negative value. A positive value indicates the total number of file descriptors that have been selected (that is, file descriptors for which the revents member is non-zero). A value of 0 indicates that the call timed out and no file descriptors have been selected. Upon failure, poll() shall return -1 and set errno to indicate the error.

schedule.c

* chan_sip.c
  • static void *do_monitor(void *data)
    • sched = sched_context_create();
    • res = ast_sched_wait(sched);
    • res = ast_io_wait(io, res);
    • if (res >= 0) ast_io_wait ¿¡¼­ ¿À·ù°¡ ¹ß»ýÇÏÁö ¾ÊÀ¸¸é
      • ast_sched_runq(sched);
* struct sched_context *sched_context_create(void)
struct sched_context *sched_context_create(void)
{
        struct sched_context *tmp;
        tmp = malloc(sizeof(struct sched_context));
        if (tmp) {
                tmp->eventcnt = 1; no. of events processed
                tmp->schedcnt = 0; no. of outstanding schedule events
                tmp->schedq = NULL; schedule entry and main queue
#ifdef SCHED_MAX_CACHE
                tmp->schedc = NULL;
                tmp->schedccnt = 0;
#endif
        }
        return tmp;
}
  • eventcnt
    • sched_context_create ¿¡¼­ 1 ·Î ¼³Á¤
    • ast_sched_add ½Ã 1 ¾¿ Áõ°¡
    • id ·Î »ç¿ëÇÔ (queue ¿¡ µé¾î¿Â ¼ø¼­ÀÓ)
    • ast_sched_del ¿¡¼­ id ¸¦ »ç¿ëÇÏ¿© Á¦°ÅÇÔ.
  • schedcnt
    • queue ¿¡ ÀÖ´Â event ÀÇ °¹¼öÀÓ.
    • sched_context_create ¿¡¼­ 0 ·Î ¼³Á¤
    • schedule ½Ã 1 ¾¿ Áõ°¡
    • ast_sched_del ¿¡¼­ 1 °¨¼Ò
    • ast_sched_runq ¿¡¼­ ¼öÇà½ÃŲ ¸¸Å­ °¨¼Ò
* int ast_sched_wait(struct sched_context *con)
int ast_sched_wait(struct sched_context *con)
{
        /*
         * Return the number of milliseconds
         * until the next scheduled event
         */
        struct timeval tv;
        int ms;
        DEBUG(ast_log(LOG_DEBUG, "ast_sched_wait()\n"));
        if (!con->schedq) 
                return -1;
        if (gettimeofday(&tv, NULL) < 0) {
                /* This should never happen */
                return 0;
        };
        ms = (con->schedq->when.tv_sec - tv.tv_sec) * 1000;
        ms += (con->schedq->when.tv_usec - tv.tv_usec) / 1000;
        if (ms < 0)
                ms = 0;
        return ms;
}
    • schedule queue ¿¡ ¾Æ¹«°Íµµ ¾øÀ¸¸é return -1
    • ÇöÀçÀÇ ½Ã°£À» tv ¿¡ assign
    • schedule queue ÀÇ Ã¹¹ø° event ÀÇ when ÀÌ ÀÌ¹Ì Áö³­ ½Ã°£À̸é return 0
    • ¾Æ´Ï¸é ³²Àº ½Ã°£À» return
* int ast_sched_runq(struct sched_context *con)
  • x=0;
  • for
    • schedule queue ¿¡ event °¡ ¾øÀ¸¸é break
    • tv ¿¡ ÇöÀç ½Ã°£ assign
    • ÇöÀç ½Ã°£ ÀÌÀüÀ¸·Î when ÀÌ ¼³Á¤µÈ event °¡ ÀÖÀ¸¸é
      • current = con->schedq;
      • con->schedq = con->schedq->next;
      • con->schedcnt--;
      • current->callback(current->data)
        • sched_release(con, current);
        • or schedule(con, current);
      • x++;
    • ¾øÀ¸¸é break
  • return x;
* chan_sip.c
  • ast_io_add(io, sipsock, sipsock_read, AST_IO_IN, NULL);
  • res = ast_io_wait(io, res);
  • static int sipsock_read(int *id, int fd, short events, void *ignore)
  • handle_request(p, &req, &sin);
  • register_verify(p, sin, req)
  • parse_contact(p, peer, req)
  • p->expire = ast_sched_add(sched, expirey * 1000, expire_register, p);
* int ast_sched_add(struct sched_context *con, int when, ast_sched_cb callback, void *data)
  • tmp->id = con->eventcnt++;
  • tmp->callback = callback;
  • tmp->data = data;
  • tmp->resched = when;
  • sched_settime(&tmp->when, when)
    • ÇöÀç ½Ã°£¿¡ when À» ´õÇÏ¿© tmp->when ¿¡ ¼³Á¤
  • schedule(con, tmp);
    • schedq ´Â ½Ã°£ ¼øÀÇ events list ÀÓ.
    • »õ·Î Ãß°¡ÇÏ´Â event ÀÇ ½Ã°£°ú ÇöÀç ÀÖ´Â events µéÀÇ ½Ã°£À» ºñ±³ÇÏ¿© ¸Â´Â ÀÚ¸®¿¡ Ãß°¡ÇÔ.
  • return tmp->id;
* static struct sched *sched_alloc(struct sched_context *con)
static struct sched *sched_alloc(struct sched_context *con)
{
        /*
         * We keep a small cache of schedule entries
         * to minimize the number of necessary malloc()'s
         */
        struct sched *tmp;
#ifdef SCHED_MAX_CACHE
        if (con->schedc) {
                tmp = con->schedc;
                con->schedc = con->schedc->next;
                con->schedccnt--;
        } else
#endif
                tmp = malloc(sizeof(struct sched));
        return tmp;
}
* static inline int sched_settime(struct timeval *tv, int when)
static inline int sched_settime(struct timeval *tv, int when)
{
        if (gettimeofday(tv, NULL) < 0) {
                        /* This shouldn't ever happen, but let's be sure */
                        ast_log(LOG_NOTICE, "gettimeofday() failed!\n");
                        return -1;
        }
        tv->tv_sec += when/1000;
        tv->tv_usec += (when % 1000) * 1000;
        if (tv->tv_usec > 1000000) {
                tv->tv_sec++;
                tv->tv_usec-= 1000000;
        }
        return 0;
}
* static void schedule(struct sched_context *con, struct sched *s)
static void schedule(struct sched_context *con, struct sched *s)
{
        /*
         * Take a sched structure and put it in the
         * queue, such that the soonest event is
         * first in the list.
         */

        struct sched *last=NULL;
        struct sched *current=con->schedq;
        while(current) {
                if (SOONER(s->when, current->when))
                        break;
                last = current;
                current = current->next;
        }
        /* Insert this event into the schedule */
        s->next = current;
        if (last)
                last->next = s;
        else
                con->schedq = s;
        con->schedcnt++;
}
* static void sched_release(struct sched_context *con, struct sched *tmp)
static void sched_release(struct sched_context *con, struct sched *tmp)
{
        /*
         * Add to the cache, or just free() if we
         * already have too many cache entries
         */

#ifdef SCHED_MAX_CACHE
        if (con->schedccnt < SCHED_MAX_CACHE) {
                tmp->next = con->schedc;
                con->schedc = tmp;
                con->schedccnt++;
        } else
#endif
                free(tmp);
}

ID
Password
Join
Far duller than a serpent's tooth it is to spend a quiet youth.


sponsored by andamiro
sponsored by cdnetworks
sponsored by HP

Valid XHTML 1.0! Valid CSS! powered by MoniWiki
last modified 2008-08-27 17:52:19
Processing time 0.0057 sec