improve comments and add more explanation about subtleties
svn path=/trunk/netsurf/; revision=11686
This commit is contained in:
parent
e4035987a7
commit
3407760e64
|
@ -50,7 +50,6 @@ struct nscallback
|
|||
* The callback function will be called as soon as possible after t cs have
|
||||
* passed.
|
||||
*/
|
||||
|
||||
void schedule(int cs_ival, void (*callback)(void *p), void *p)
|
||||
{
|
||||
struct nscallback *nscb;
|
||||
|
@ -61,7 +60,7 @@ void schedule(int cs_ival, void (*callback)(void *p), void *p)
|
|||
|
||||
nscb = calloc(1, sizeof(struct nscallback));
|
||||
|
||||
LOG(("adding callback %p for %p(%p) at %d cs", nscb, callback, p, cs_ival));
|
||||
LOG(("adding callback %p for %p(%p) at %d cs", nscb, callback, p, cs_ival));
|
||||
|
||||
gettimeofday(&nscb->tv, NULL);
|
||||
timeradd(&nscb->tv, &tv, &nscb->tv);
|
||||
|
@ -123,12 +122,8 @@ void schedule_remove(void (*callback)(void *p), void *p)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process scheduled callbacks up to current time.
|
||||
*
|
||||
* @return The number of milliseconds untill the next scheduled event
|
||||
* or -1 for no event.
|
||||
*/
|
||||
|
||||
/* exported interface documented in schedule.h */
|
||||
int
|
||||
schedule_run(void)
|
||||
{
|
||||
|
@ -169,11 +164,13 @@ schedule_run(void)
|
|||
|
||||
free(unlnk_nscb);
|
||||
|
||||
/* need to deal with callback modifying the list. */
|
||||
/* dispatched events can modify the list,
|
||||
* instead of locking we simply reset list
|
||||
* enumeration to the start.
|
||||
*/
|
||||
if (schedule_list == NULL)
|
||||
return -1; /* no more callbacks scheduled */
|
||||
|
||||
/* reset enumeration to the start of the list */
|
||||
cur_nscb = schedule_list;
|
||||
prev_nscb = NULL;
|
||||
nexttime = cur_nscb->tv;
|
||||
|
@ -190,16 +187,17 @@ schedule_run(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* make rettime relative to now */
|
||||
/* make returned time relative to now */
|
||||
timersub(&nexttime, &tv, &rettime);
|
||||
|
||||
#if DEBUG_SCHEDULER
|
||||
#if defined(DEBUG_SCHEDULER)
|
||||
LOG(("returning time to next event as %ldms",(rettime.tv_sec * 1000) + (rettime.tv_usec / 1000)));
|
||||
#endif
|
||||
/* return next event time in milliseconds (24days max wait) */
|
||||
return (rettime.tv_sec * 1000) + (rettime.tv_usec / 1000);
|
||||
}
|
||||
|
||||
/* exported interface documented in schedule.h */
|
||||
void list_schedule(void)
|
||||
{
|
||||
struct timeval tv;
|
||||
|
|
|
@ -19,7 +19,35 @@
|
|||
#ifndef FRAMEBUFFER_SCHEDULE_H
|
||||
#define FRAMEBUFFER_SCHEDULE_H
|
||||
|
||||
/**
|
||||
* Process scheduled callbacks up to current time.
|
||||
*
|
||||
* This walks the list of outstanding scheduled events and dispatches
|
||||
* them if they have met their scheduled time. Due to legacy issues
|
||||
* there are a couple of subtleties with how this operates:
|
||||
*
|
||||
* - Generally there are so few entries on the list the overhead of
|
||||
* ordering the list exceeds the cost of simply enumerating them.
|
||||
*
|
||||
* - The scheduled time is the time *after* which we should call the
|
||||
* operation back, this can result in the next scheduled time
|
||||
* being zero. This is exceedingly rare as the core schedules in
|
||||
* 10ms (cs) quanta and we almost always get called to schedule
|
||||
* after the event time.
|
||||
*
|
||||
* - The callbacks can cause the schedule list to be re-arranged added
|
||||
* to or even completely deleted. This means we must reset the
|
||||
* list enumeration to the beginning every time an event is
|
||||
* dispatched.
|
||||
*
|
||||
* @return The number of milliseconds untill the next scheduled event
|
||||
* or -1 for no event.
|
||||
*/
|
||||
int schedule_run(void);
|
||||
|
||||
/**
|
||||
* LOG all current scheduled events.
|
||||
*/
|
||||
void list_schedule(void);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue