blob: b96b1c903721d28bdd8d1f8893fa378f530811d0 [file] [log] [blame]
Alexandre Lisionddd731e2014-01-31 11:50:08 -05001// Copyright (C) 2006-2010 David Sugar, Tycho Softworks.
2//
3// This file is part of GNU uCommon C++.
4//
5// GNU uCommon C++ is free software: you can redistribute it and/or modify
6// it under the terms of the GNU Lesser General Public License as published
7// by the Free Software Foundation, either version 3 of the License, or
8// (at your option) any later version.
9//
10// GNU uCommon C++ is distributed in the hope that it will be useful,
11// but WITHOUT ANY WARRANTY; without even the implied warranty of
12// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13// GNU Lesser General Public License for more details.
14//
15// You should have received a copy of the GNU Lesser General Public License
16// along with GNU uCommon C++. If not, see <http://www.gnu.org/licenses/>.
17
18/**
19 * Thread classes and sychronization objects.
20 * The theory behind ucommon thread classes is that they would be used
21 * to create derived classes where thread-specific data can be stored as
22 * member data of the derived class. The run method is called when the
23 * context is executed. Since we use a pthread foundation, we support
24 * both detached threads and joinable threads. Objects based on detached
25 * threads should be created with new, and will automatically delete when
26 * the thread context exits. Joinable threads will be joined with deleted.
27 *
28 * The theory behind ucommon sychronization objects is that all upper level
29 * sychronization objects can be formed directly from a mutex and conditional.
30 * This includes semaphores, barriers, rwlock, our own specialized conditional
31 * lock, resource-bound locking, and recurive exclusive locks. Using only
32 * conditionals means we are not dependent on platform specific pthread
33 * implimentations that may not impliment some of these, and hence improves
34 * portability and consistency. Given that our rwlocks are recursive access
35 * locks, one can safely create read/write threading pairs where the read
36 * threads need not worry about deadlocks and the writers need not either if
37 * they only write-lock one instance at a time to change state.
38 * @file ucommon/thread.h
39 */
40
41/**
42 * An example of the thread queue class. This may be relevant to producer-
43 * consumer scenarios and realtime applications where queued messages are
44 * stored on a re-usable object pool.
45 * @example queue.cpp
46 */
47
48/**
49 * A simple example of threading and join operation.
50 * @example thread.cpp
51 */
52
53#ifndef _UCOMMON_THREAD_H_
54#define _UCOMMON_THREAD_H_
55
56#ifndef _UCOMMON_CPR_H_
57#include <ucommon/cpr.h>
58#endif
59
60#ifndef _UCOMMON_ACCESS_H_
61#include <ucommon/access.h>
62#endif
63
64#ifndef _UCOMMON_TIMERS_H_
65#include <ucommon/timers.h>
66#endif
67
68#ifndef _UCOMMON_MEMORY_H_
69#include <ucommon/memory.h>
70#endif
71
72NAMESPACE_UCOMMON
73
74class SharedPointer;
75
76/**
77 * The conditional is a common base for other thread synchronizing classes.
78 * Many of the complex sychronization objects, including barriers, semaphores,
79 * and various forms of read/write locks are all built from the conditional.
80 * This assures that the minimum functionality to build higher order thread
81 * synchronizing objects is a pure conditional, and removes dependencies on
82 * what may be optional features or functions that may have different
83 * behaviors on different pthread implimentations and platforms.
84 * @author David Sugar <dyfet@gnutelephony.org>
85 */
86class __EXPORT Conditional
87{
88private:
89 friend class ConditionalAccess;
90
91#if defined(_MSCONDITIONAL_)
92 CRITICAL_SECTION mutex;
93 CONDITION_VARIABLE cond;
94#elif defined(_MSWINDOWS_)
95 enum {SIGNAL = 0, BROADCAST = 1};
96 HANDLE events[2];
97 unsigned waiting;
98 CRITICAL_SECTION mlock;
99 CRITICAL_SECTION mutex;
100#else
101#ifndef __PTH__
102 class __LOCAL attribute
103 {
104 public:
105 pthread_condattr_t attr;
106 attribute();
107 };
108
109 __LOCAL static attribute attr;
110#endif
111
112 pthread_cond_t cond;
113 pthread_mutex_t mutex;
114#endif
115
116protected:
117 friend class TimedEvent;
118
119 /**
120 * Conditional wait for signal on millisecond timeout.
121 * @param timeout in milliseconds.
122 * @return true if signalled, false if timer expired.
123 */
124 bool wait(timeout_t timeout);
125
126 /**
127 * Conditional wait for signal on timespec timeout.
128 * @param timeout as a high resolution timespec.
129 * @return true if signalled, false if timer expired.
130 */
131 bool wait(struct timespec *timeout);
132
133#ifdef _MSWINDOWS_
134 inline void lock(void)
135 {EnterCriticalSection(&mutex);};
136
137 inline void unlock(void)
138 {LeaveCriticalSection(&mutex);};
139
140 void wait(void);
141 void signal(void);
142 void broadcast(void);
143
144#else
145 /**
146 * Lock the conditional's supporting mutex.
147 */
148 inline void lock(void)
149 {pthread_mutex_lock(&mutex);};
150
151 /**
152 * Unlock the conditional's supporting mutex.
153 */
154 inline void unlock(void)
155 {pthread_mutex_unlock(&mutex);};
156
157 /**
158 * Wait (block) until signalled.
159 */
160 inline void wait(void)
161 {pthread_cond_wait(&cond, &mutex);};
162
163 /**
164 * Signal the conditional to release one waiting thread.
165 */
166 inline void signal(void)
167 {pthread_cond_signal(&cond);};
168
169 /**
170 * Signal the conditional to release all waiting threads.
171 */
172 inline void broadcast(void)
173 {pthread_cond_broadcast(&cond);};
174#endif
175
176 /**
177 * Initialize and construct conditional.
178 */
179 Conditional();
180
181 /**
182 * Destroy conditional, release any blocked threads.
183 */
184 ~Conditional();
185
186public:
187#if !defined(_MSWINDOWS_) && !defined(__PTH__)
188 /**
189 * Support function for getting conditional attributes for realtime
190 * scheduling.
191 * @return attributes to use for creating realtime conditionals.
192 */
193 static inline pthread_condattr_t *initializer(void)
194 {return &attr.attr;};
195#endif
196
197 /**
198 * Convert a millisecond timeout into use for high resolution
199 * conditional timers.
200 * @param hires timespec representation to set.
201 * @param timeout to convert.
202 */
203 static void set(struct timespec *hires, timeout_t timeout);
204};
205
206/**
207 * The conditional rw seperates scheduling for optizming behavior or rw locks.
208 * This varient of conditonal seperates scheduling read (broadcast wakeup) and
209 * write (signal wakeup) based threads. This is used to form generic rwlock's
210 * as well as the specialized condlock.
211 * @author David Sugar <dyfet@gnutelephony.org>
212 */
213class __EXPORT ConditionalAccess : private Conditional
214{
215protected:
216#if defined _MSCONDITIONAL_
217 CONDITION_VARIABLE bcast;
218#elif !defined(_MSWINDOWS_)
219 pthread_cond_t bcast;
220#endif
221
222 unsigned pending, waiting, sharing;
223
224 /**
225 * Conditional wait for signal on millisecond timeout.
226 * @param timeout in milliseconds.
227 * @return true if signalled, false if timer expired.
228 */
229 bool waitSignal(timeout_t timeout);
230
231 /**
232 * Conditional wait for broadcast on millisecond timeout.
233 * @param timeout in milliseconds.
234 * @return true if signalled, false if timer expired.
235 */
236 bool waitBroadcast(timeout_t timeout);
237
238
239 /**
240 * Conditional wait for signal on timespec timeout.
241 * @param timeout as a high resolution timespec.
242 * @return true if signalled, false if timer expired.
243 */
244 bool waitSignal(struct timespec *timeout);
245
246 /**
247 * Conditional wait for broadcast on timespec timeout.
248 * @param timeout as a high resolution timespec.
249 * @return true if signalled, false if timer expired.
250 */
251 bool waitBroadcast(struct timespec *timeout);
252
253 /**
254 * Convert a millisecond timeout into use for high resolution
255 * conditional timers.
256 * @param hires timespec representation to set.
257 * @param timeout to convert.
258 */
259 inline static void set(struct timespec *hires, timeout_t timeout)
260 {Conditional::set(hires, timeout);};
261
262
263#ifdef _MSWINDOWS_
264 inline void lock(void)
265 {EnterCriticalSection(&mutex);};
266
267 inline void unlock(void)
268 {LeaveCriticalSection(&mutex);};
269
270 void waitSignal(void);
271 void waitBroadcast(void);
272
273 inline void signal(void)
274 {Conditional::signal();};
275
276 inline void broadcast(void)
277 {Conditional::broadcast();};
278
279#else
280 /**
281 * Lock the conditional's supporting mutex.
282 */
283 inline void lock(void)
284 {pthread_mutex_lock(&mutex);};
285
286 /**
287 * Unlock the conditional's supporting mutex.
288 */
289 inline void unlock(void)
290 {pthread_mutex_unlock(&mutex);};
291
292 /**
293 * Wait (block) until signalled.
294 */
295 inline void waitSignal(void)
296 {pthread_cond_wait(&cond, &mutex);};
297
298 /**
299 * Wait (block) until broadcast.
300 */
301 inline void waitBroadcast(void)
302 {pthread_cond_wait(&bcast, &mutex);};
303
304
305 /**
306 * Signal the conditional to release one signalled thread.
307 */
308 inline void signal(void)
309 {pthread_cond_signal(&cond);};
310
311 /**
312 * Signal the conditional to release all broadcast threads.
313 */
314 inline void broadcast(void)
315 {pthread_cond_broadcast(&bcast);};
316#endif
317public:
318 /**
319 * Initialize and construct conditional.
320 */
321 ConditionalAccess();
322
323 /**
324 * Destroy conditional, release any blocked threads.
325 */
326 ~ConditionalAccess();
327
328 /**
329 * Access mode shared thread scheduling.
330 */
331 void access(void);
332
333 /**
334 * Exclusive mode write thread scheduling.
335 */
336 void modify(void);
337
338 /**
339 * Release access mode read scheduling.
340 */
341 void release(void);
342
343 /**
344 * Complete exclusive mode write scheduling.
345 */
346 void commit(void);
347
348 /**
349 * Specify a maximum sharing (access) limit. This can be used
350 * to detect locking errors, such as when aquiring locks that are
351 * not released.
352 * @param max sharing level.
353 */
354 void limit_sharing(unsigned max);
355};
356
357/**
358 * Event notification to manage scheduled realtime threads. The timer
359 * is advanced to sleep threads which then wakeup either when the timer
360 * has expired or they are notified through the signal handler. This can
361 * be used to schedule and signal one-time completion handlers or for time
362 * synchronized events signaled by an asychrononous I/O or event source.
363 * @author David Sugar <dyfet@gnutelephony.org>
364 */
365class __EXPORT TimedEvent : public Timer
366{
367private:
368#ifdef _MSWINDOWS_
369 HANDLE event;
370#else
371 pthread_cond_t cond;
372 bool signalled;
373#endif
374 pthread_mutex_t mutex;
375
376protected:
377 /**
378 * Lock the object for wait or to manipulate derived data. This is
379 * relevant to manipulations in a derived class.
380 */
381 void lock(void);
382
383 /**
384 * Release the object lock after waiting. This is relevent to
385 * manipulations in a derived class.
386 */
387 void release(void);
388
389 /**
390 * Wait while locked. This can be used in more complex derived
391 * objects where we are concerned with synchronized access between
392 * the signaling and event thread. This can be used in place of
393 * wait, but lock and release methods must be used around it.
394 * @return true if time expired.
395 */
396 bool sync(void);
397
398public:
399 /**
400 * Create event handler and timer for timing of events.
401 */
402 TimedEvent(void);
403
404 /**
405 * Create event handler and timer set to trigger a timeout.
406 * @param timeout in milliseconds.
407 */
408 TimedEvent(timeout_t timeout);
409
410 /**
411 * Create event handler and timer set to trigger a timeout.
412 * @param timeout in seconds.
413 */
414 TimedEvent(time_t timeout);
415
416 /**
417 * Destroy timer and release pending events.
418 */
419 ~TimedEvent();
420
421 /**
422 * Signal pending event. Object may be locked or unlocked. The
423 * signalling thread may choose to lock and check a condition in
424 * a derived class before signalling.
425 */
426 void signal(void);
427
428 /**
429 * Wait to be signalled or until timer expires. This is a wrapper for
430 * expire for simple completion events.
431 * @param timeout to wait from last reset.
432 * @return true if signaled, false if timeout.
433 */
434 bool wait(timeout_t timeout);
435
436 /**
437 * A simple wait until triggered.
438 */
439 void wait(void);
440
441 /**
442 * Reset triggered conditional.
443 */
444 void reset(void);
445};
446
447/**
448 * Portable recursive exclusive lock. This class is built from the
449 * conditional and hence does not require support for non-standard and
450 * platform specific extensions to pthread mutex to support recrusive
451 * style mutex locking. The exclusive protocol is implimented to support
452 * exclusive_lock referencing.
453 */
454class __EXPORT RecursiveMutex : private Conditional, public ExclusiveAccess
455{
456protected:
457 unsigned waiting;
458 unsigned lockers;
459 pthread_t locker;
460
461 virtual void _lock(void);
462 virtual void _unlock(void);
463
464public:
465 /**
466 * Create rexlock.
467 */
468 RecursiveMutex();
469
470 /**
471 * Acquire or increase locking.
472 */
473 void lock(void);
474
475 /**
476 * Timed lock request.
477 */
478 bool lock(timeout_t timeout);
479
480 /**
481 * Release or decrease locking.
482 */
483 void release(void);
484};
485
486/**
487 * A generic and portable implimentation of Read/Write locking. This
488 * class impliments classical read/write locking, including "timed" locks.
489 * Support for scheduling threads to avoid writer starvation is also provided
490 * for. By building read/write locks from a conditional, we make them
491 * available on pthread implimetations and other platforms which do not
492 * normally include optional pthread rwlock's. We also do not restrict
493 * the number of threads that may use the lock. Finally, both the exclusive
494 * and shared protocols are implimented to support exclusive_lock and
495 * shared_lock referencing.
496 * @author David Sugar <dyfet@gnutelephony.org>
497 */
498class __EXPORT ThreadLock : private ConditionalAccess, public ExclusiveAccess, public SharedAccess
499{
500protected:
501 unsigned writers;
502 pthread_t writeid;
503
504 virtual void _lock(void);
505 virtual void _share(void);
506 virtual void _unlock(void);
507
508public:
509 /**
510 * Guard class to apply scope based access locking to objects. The rwlock
511 * is located from the rwlock pool rather than contained in the target
512 * object, and the read lock is released when the guard object falls out of
513 * scope. This is essentially an automation mechanism for mutex::reader.
514 * @author David Sugar <dyfet@gnutelephony.org>
515 */
516 class __EXPORT guard_reader
517 {
518 private:
519 const void *object;
520
521 public:
522 /**
523 * Create an unitialized instance of guard. Usually used with a
524 * guard = operator.
525 */
526 guard_reader();
527
528 /**
529 * Construct a guard for a specific object.
530 * @param object to guard.
531 */
532 guard_reader(const void *object);
533
534 /**
535 * Release mutex when guard falls out of scope.
536 */
537 ~guard_reader();
538
539 /**
540 * Set guard to mutex lock a new object. If a lock is currently
541 * held, it is released.
542 * @param object to guard.
543 */
544 void set(const void *object);
545
546 /**
547 * Prematurely release a guard.
548 */
549 void release(void);
550
551 /**
552 * Set guard to read lock a new object. If a lock is currently
553 * held, it is released.
554 * @param pointer to object to guard.
555 */
556 inline void operator=(const void *pointer)
557 {set(pointer);};
558 };
559
560 /**
561 * Guard class to apply scope based exclusive locking to objects. The rwlock
562 * is located from the rwlock pool rather than contained in the target
563 * object, and the write lock is released when the guard object falls out of
564 * scope. This is essentially an automation mechanism for mutex::writer.
565 * @author David Sugar <dyfet@gnutelephony.org>
566 */
567 class __EXPORT guard_writer
568 {
569 private:
570 const void *object;
571
572 public:
573 /**
574 * Create an unitialized instance of guard. Usually used with a
575 * guard = operator.
576 */
577 guard_writer();
578
579 /**
580 * Construct a guard for a specific object.
581 * @param object to guard.
582 */
583 guard_writer(const void *object);
584
585 /**
586 * Release mutex when guard falls out of scope.
587 */
588 ~guard_writer();
589
590 /**
591 * Set guard to mutex lock a new object. If a lock is currently
592 * held, it is released.
593 * @param object to guard.
594 */
595 void set(const void *object);
596
597 /**
598 * Prematurely release a guard.
599 */
600 void release(void);
601
602 /**
603 * Set guard to read lock a new object. If a lock is currently
604 * held, it is released.
605 * @param pointer to object to guard.
606 */
607 inline void operator=(const void *pointer)
608 {set(pointer);};
609 };
610
611 /**
612 * Create an instance of a rwlock.
613 */
614 ThreadLock();
615
616 /**
617 * Request modify (write) access through the lock.
618 * @param timeout in milliseconds to wait for lock.
619 * @return true if locked, false if timeout.
620 */
621 bool modify(timeout_t timeout = Timer::inf);
622
623 /**
624 * Request shared (read) access through the lock.
625 * @param timeout in milliseconds to wait for lock.
626 * @return true if locked, false if timeout.
627 */
628 bool access(timeout_t timeout = Timer::inf);
629
630 /**
631 * Specify hash table size for guard protection. The default is 1.
632 * This should be called at initialization time from the main thread
633 * of the application before any other threads are created.
634 * @param size of hash table used for guarding.
635 */
636 static void indexing(unsigned size);
637
638 /**
639 * Write protect access to an arbitrary object. This is like the
640 * protect function of mutex.
641 * @param object to protect.
642 * @param timeout in milliseconds to wait for lock.
643 * @return true if locked, false if timeout.
644 */
645 static bool writer(const void *object, timeout_t timeout = Timer::inf);
646
647 /**
648 * Shared access to an arbitrary object. This is based on the protect
649 * function of mutex.
650 * @param object to share.
651 * @param timeout in milliseconds to wait for lock.
652 * @return true if shared, false if timeout.
653 */
654 static bool reader(const void *object, timeout_t timeout = Timer::inf);
655
656 /**
657 * Release an arbitrary object that has been protected by a rwlock.
658 * @param object to release.
659 */
660 static void release(const void *object);
661
662 /**
663 * Release the lock.
664 */
665 void release(void);
666};
667
668/**
669 * Class for resource bound memory pools between threads. This is used to
670 * support a memory pool allocation scheme where a pool of reusable objects
671 * may be allocated, and the pool renewed by releasing objects or back.
672 * When the pool is used up, a pool consuming thread then must wait for
673 * a resource to be freed by another consumer (or timeout). This class is
674 * not meant to be used directly, but rather to build the synchronizing
675 * control between consumers which might be forced to wait for a resource.
676 * @author David Sugar <dyfet@gnutelephony.org>
677 */
678class __EXPORT ReusableAllocator : protected Conditional
679{
680protected:
681 ReusableObject *freelist;
682 unsigned waiting;
683
684 /**
685 * Initialize reusable allocator through a conditional. Zero free list.
686 */
687 ReusableAllocator();
688
689 /**
690 * Get next reusable object in the pool.
691 * @param object from list.
692 * @return next object.
693 */
694 inline ReusableObject *next(ReusableObject *object)
695 {return object->getNext();};
696
697 /**
698 * Release resuable object
699 * @param object being released.
700 */
701 void release(ReusableObject *object);
702};
703
704/**
705 * An optimized and convertable shared lock. This is a form of read/write
706 * lock that has been optimized, particularly for shared access. Support
707 * for scheduling access around writer starvation is also included. The
708 * other benefits over traditional read/write locks is that the code is
709 * a little lighter, and read (shared) locks can be converted to exclusive
710 * (write) locks to perform brief modify operations and then returned to read
711 * locks, rather than having to release and re-aquire locks to change mode.
712 * @author David Sugar <dyfet@gnutelephony.org>
713 */
714class __EXPORT ConditionalLock : protected ConditionalAccess, public SharedAccess
715{
716protected:
717 class Context : public LinkedObject
718 {
719 public:
720 inline Context(LinkedObject **root) : LinkedObject(root) {};
721
722 pthread_t thread;
723 unsigned count;
724 };
725
726 LinkedObject *contexts;
727
728 virtual void _share(void);
729 virtual void _unlock(void);
730
731 Context *getContext(void);
732
733public:
734 /**
735 * Construct conditional lock for default concurrency.
736 */
737 ConditionalLock();
738
739 /**
740 * Destroy conditional lock.
741 */
742 ~ConditionalLock();
743
744 /**
745 * Acquire write (exclusive modify) lock.
746 */
747 void modify(void);
748
749 /**
750 * Commit changes / release a modify lock.
751 */
752 void commit(void);
753
754 /**
755 * Acquire access (shared read) lock.
756 */
757 void access(void);
758
759 /**
760 * Release a shared lock.
761 */
762 void release(void);
763
764 /**
765 * Convert read lock into exclusive (write/modify) access. Schedule
766 * when other readers sharing.
767 */
768 virtual void exclusive(void);
769
770 /**
771 * Return an exclusive access lock back to share mode.
772 */
773 virtual void share(void);
774};
775
776/**
777 * A portable implimentation of "barrier" thread sychronization. A barrier
778 * waits until a specified number of threads have all reached the barrier,
779 * and then releases all the threads together. This implimentation works
780 * regardless of whether the thread library supports barriers since it is
781 * built from conditional. It also differs in that the number of threads
782 * required can be changed dynamically at runtime, unlike pthread barriers
783 * which, when supported, have a fixed limit defined at creation time. Since
784 * we use conditionals, another feature we can add is optional support for a
785 * wait with timeout.
786 * @author David Sugar <dyfet@gnutelephony.org>
787 */
788class __EXPORT barrier : private Conditional
789{
790private:
791 unsigned count;
792 unsigned waits;
793
794public:
795 /**
796 * Construct a barrier with an initial size.
797 * @param count of threads required.
798 */
799 barrier(unsigned count);
800
801 /**
802 * Destroy barrier and release pending threads.
803 */
804 ~barrier();
805
806 /**
807 * Dynamically alter the number of threads required. If the size is
808 * set below the currently waiting threads, then the barrier releases.
809 * @param count of threads required.
810 */
811 void set(unsigned count);
812
813 /**
814 * Dynamically increment the number of threads required.
815 */
816 void inc(void);
817
818 /**
819 * Reduce the number of threads required.
820 */
821 void dec(void);
822
823 /**
824 * Alternative prefix form of the same increment operation.
825 * @return the current amount of threads.
826 */
827 unsigned operator++(void);
828
829 unsigned operator--(void);
830
831 /**
832 * Wait at the barrier until the count of threads waiting is reached.
833 */
834 void wait(void);
835
836 /**
837 * Wait at the barrier until either the count of threads waiting is
838 * reached or a timeout has occurred.
839 * @param timeout to wait in milliseconds.
840 * @return true if barrier reached, false if timer expired.
841 */
842 bool wait(timeout_t timeout);
843};
844
845/**
846 * A portable counting semaphore class. A semaphore will allow threads
847 * to pass through it until the count is reached, and blocks further threads.
848 * Unlike pthread semaphore, our semaphore class supports it's count limit
849 * to be altered during runtime and the use of timed waits. This class also
850 * implements the shared_lock protocol.
851 * @author David Sugar <dyfet@gnutelephony.org>
852 */
853class __EXPORT Semaphore : public SharedAccess, protected Conditional
854{
855protected:
856 unsigned count, waits, used;
857
858 virtual void _share(void);
859 virtual void _unlock(void);
860
861public:
862 /**
863 * Construct a semaphore with an initial count of threads to permit.
864 */
865 Semaphore(unsigned count = 0);
866
867 /**
868 * Wait until the semphore usage count is less than the thread limit.
869 * Increase used count for our thread when unblocked.
870 */
871 void wait(void);
872
873 /**
874 * Wait until the semphore usage count is less than the thread limit.
875 * Increase used count for our thread when unblocked, or return without
876 * changing if timed out.
877 * @param timeout to wait in millseconds.
878 * @return true if success, false if timeout.
879 */
880 bool wait(timeout_t timeout);
881
882 /**
883 * Alter semaphore limit at runtime
884 * @param count of threads to allow.
885 */
886 void set(unsigned count);
887
888 /**
889 * Release the semaphore after waiting for it.
890 */
891 void release(void);
892
893 /**
894 * Convenience operator to wait on a counting semaphore.
895 */
896 inline void operator++(void)
897 {wait();};
898
899 /**
900 * Convenience operator to release a counting semaphore.
901 */
902 inline void operator--(void)
903 {release();};
904};
905
906/**
907 * Generic non-recursive exclusive lock class. This class also impliments
908 * the exclusive_lock protocol. In addition, an interface is offered to
909 * support dynamically managed mutexes which are internally pooled. These
910 * can be used to protect and serialize arbitrary access to memory and
911 * objects on demand. This offers an advantage over embedding mutexes to
912 * serialize access to individual objects since the maximum number of
913 * mutexes will never be greater than the number of actually running threads
914 * rather than the number of objects being potentially protected. The
915 * ability to hash the pointer address into an indexed table further optimizes
916 * access by reducing the chance for collisions on the primary index mutex.
917 * @author David Sugar <dyfet@gnutelephony.org>
918 */
919class __EXPORT Mutex : public ExclusiveAccess
920{
921protected:
922 pthread_mutex_t mlock;
923
924 virtual void _lock(void);
925 virtual void _unlock(void);
926
927public:
928 /**
929 * Guard class to apply scope based mutex locking to objects. The mutex
930 * is located from the mutex pool rather than contained in the target
931 * object, and the lock is released when the guard object falls out of
932 * scope. This is essentially an automation mechanism for mutex::protect.
933 * @author David Sugar <dyfet@gnutelephony.org>
934 */
935 class __EXPORT guard
936 {
937 private:
938 const void *object;
939
940 public:
941 /**
942 * Create an unitialized instance of guard. Usually used with a
943 * guard = operator.
944 */
945 guard();
946
947 /**
948 * Construct a guard for a specific object.
949 * @param object to guard.
950 */
951 guard(const void *object);
952
953 /**
954 * Release mutex when guard falls out of scope.
955 */
956 ~guard();
957
958 /**
959 * Set guard to mutex lock a new object. If a lock is currently
960 * held, it is released.
961 * @param object to guard.
962 */
963 void set(const void *object);
964
965 /**
966 * Prematurely release a guard.
967 */
968 void release(void);
969
970 /**
971 * Set guard to mutex lock a new object. If a lock is currently
972 * held, it is released.
973 * @param pointer to object to guard.
974 */
975 inline void operator=(void *pointer)
976 {set(pointer);};
977 };
978
979
980 /**
981 * Create a mutex lock.
982 */
983 Mutex();
984
985 /**
986 * Destroy mutex lock, release waiting threads.
987 */
988 ~Mutex();
989
990 /**
991 * Acquire mutex lock. This is a blocking operation.
992 */
993 inline void acquire(void)
994 {pthread_mutex_lock(&mlock);};
995
996 /**
997 * Acquire mutex lock. This is a blocking operation.
998 */
999 inline void lock(void)
1000 {pthread_mutex_lock(&mlock);};
1001
1002 /**
1003 * Release acquired lock.
1004 */
1005 inline void unlock(void)
1006 {pthread_mutex_unlock(&mlock);};
1007
1008 /**
1009 * Release acquired lock.
1010 */
1011 inline void release(void)
1012 {pthread_mutex_unlock(&mlock);};
1013
1014 /**
1015 * Convenience function to acquire os native mutex lock directly.
1016 * @param lock to acquire.
1017 */
1018 inline static void acquire(pthread_mutex_t *lock)
1019 {pthread_mutex_lock(lock);};
1020
1021 /**
1022 * Convenience function to release os native mutex lock directly.
1023 * @param lock to release.
1024 */
1025 inline static void release(pthread_mutex_t *lock)
1026 {pthread_mutex_unlock(lock);};
1027
1028 /**
1029 * Specify hash table size for guard protection. The default is 1.
1030 * This should be called at initialization time from the main thread
1031 * of the application before any other threads are created.
1032 * @param size of hash table used for guarding.
1033 */
1034 static void indexing(unsigned size);
1035
1036 /**
1037 * Specify pointer/object/resource to guard protect. This uses a
1038 * dynamically managed mutex.
1039 * @param pointer to protect.
1040 */
1041 static void protect(const void *pointer);
1042
1043 /**
1044 * Specify a pointer/object/resource to release.
1045 * @param pointer to release.
1046 */
1047 static void release(const void *pointer);
1048};
1049
1050/**
1051 * A mutex locked object smart pointer helper class. This is particularly
1052 * useful in referencing objects which will be protected by the mutex
1053 * protect function. When the pointer falls out of scope, the protecting
1054 * mutex is also released. This is meant to be used by the typed
1055 * mutex_pointer template.
1056 * @author David Sugar <dyfet@gnutelephony.org>
1057 */
1058class __EXPORT auto_protect
1059{
1060private:
1061 // cannot copy...
1062 inline auto_protect(const auto_object &pointer) {};
1063
1064protected:
1065 const void *object;
1066
1067 auto_protect();
1068
1069public:
1070 /**
1071 * Construct a protected pointer referencing an existing object.
1072 * @param object we point to.
1073 */
1074 auto_protect(const void *object);
1075
1076 /**
1077 * Delete protected pointer. When it falls out of scope the associated
1078 * mutex is released.
1079 */
1080 ~auto_protect();
1081
1082 /**
1083 * Manually release the pointer. This releases the mutex.
1084 */
1085 void release(void);
1086
1087 /**
1088 * Test if the pointer is not set.
1089 * @return true if the pointer is not referencing anything.
1090 */
1091 inline bool operator!() const
1092 {return object == NULL;};
1093
1094 /**
1095 * Test if the pointer is referencing an object.
1096 * @return true if the pointer is currently referencing an object.
1097 */
1098 inline operator bool() const
1099 {return object != NULL;};
1100
1101 /**
1102 * Set our pointer to a specific object. If the pointer currently
1103 * references another object, the associated mutex is released. The
1104 * pointer references our new object and that new object is locked.
1105 * @param object to assign to.
1106 */
1107 void operator=(const void *object);
1108};
1109
1110/**
1111 * An object pointer that uses mutex to assure thread-safe singleton use.
1112 * This class is used to support a threadsafe replacable pointer to a object.
1113 * This class is used to form and support the templated locked_pointer class
1114 * and used with the locked_release class. An example of where this might be
1115 * used is in config file parsers, where a seperate thread may process and
1116 * generate a new config object for new threads to refernce, while the old
1117 * configuration continues to be used by a reference counted instance that
1118 * goes away when it falls out of scope.
1119 * @author David Sugar <dyfet@gnutelephony.org>
1120 */
1121class __EXPORT LockedPointer
1122{
1123private:
1124 friend class locked_release;
1125 pthread_mutex_t mutex;
1126 ObjectProtocol *pointer;
1127
1128protected:
1129 /**
1130 * Create an instance of a locked pointer.
1131 */
1132 LockedPointer();
1133
1134 /**
1135 * Replace existing object with a new one for next request.
1136 * @param object to register with pointer.
1137 */
1138 void replace(ObjectProtocol *object);
1139
1140 /**
1141 * Create a duplicate reference counted instance of the current object.
1142 * @return duplicate reference counted object.
1143 */
1144 ObjectProtocol *dup(void);
1145
1146 /**
1147 * Replace existing object through assignment.
1148 * @param object to assign.
1149 */
1150 inline void operator=(ObjectProtocol *object)
1151 {replace(object);};
1152};
1153
1154/**
1155 * Shared singleton object. A shared singleton object is a special kind of
1156 * object that may be shared by multiple threads but which only one active
1157 * instance is allowed to exist. The shared object is managed by the
1158 * templated shared pointer class, and is meant to be inherited as a base
1159 * class for the derived shared singleton type.
1160 * @author David Sugar <dyfet@gnutelephony.org>
1161 */
1162class __EXPORT SharedObject
1163{
1164protected:
1165 friend class SharedPointer;
1166
1167 /**
1168 * Commit is called when a shared singleton is accepted and replaces
1169 * a prior instance managed by a shared pointer. Commit occurs
1170 * when replace is called on the shared pointer, and is assured to
1171 * happen only when no threads are accessing either the current
1172 * or the prior instance that was previously protected by the pointer.
1173 * @param pointer that now holds the object.
1174 */
1175 virtual void commit(SharedPointer *pointer);
1176
1177public:
1178 /**
1179 * Allows inherited virtual.
1180 */
1181 virtual ~SharedObject();
1182};
1183
1184/**
1185 * The shared pointer is used to manage a singleton instance of shared object.
1186 * This class is used to support the templated shared_pointer class and the
1187 * shared_release class, and is not meant to be used directly or as a base
1188 * for anything else. One or more threads may aquire a shared lock to the
1189 * singleton object through this pointer, and it can only be replaced with a
1190 * new singleton instance when no threads reference it. The conditional lock
1191 * is used to manage shared access for use and exclusive access when modified.
1192 * @author David Sugar <dyfet@gnutelephony.org>
1193 */
1194class __EXPORT SharedPointer : protected ConditionalAccess
1195{
1196private:
1197 friend class shared_release;
1198 SharedObject *pointer;
1199
1200protected:
1201 /**
1202 * Created shared locking for pointer. Must be assigned by replace.
1203 */
1204 SharedPointer();
1205
1206 /**
1207 * Destroy lock and release any blocked threads.
1208 */
1209 ~SharedPointer();
1210
1211 /**
1212 * Replace existing singleton instance with new one. This happens
1213 * during exclusive locking, and the commit method of the object
1214 * will be called.
1215 * @param object being set.
1216 */
1217 void replace(SharedObject *object);
1218
1219 /**
1220 * Acquire a shared reference to the singleton object. This is a
1221 * form of shared access lock. Derived classes and templates access
1222 * "release" when the shared pointer is no longer needed.
1223 * @return shared object.
1224 */
1225 SharedObject *share(void);
1226};
1227
1228/**
1229 * An abstract class for defining classes that operate as a thread. A derived
1230 * thread class has a run method that is invoked with the newly created
1231 * thread context, and can use the derived object to store all member data
1232 * that needs to be associated with that context. This means the derived
1233 * object can safely hold thread-specific data that is managed with the life
1234 * of the object, rather than having to use the clumsy thread-specific data
1235 * management and access functions found in thread support libraries.
1236 * @author David Sugar <dyfet@gnutelephony.org>
1237 */
1238class __EXPORT Thread
1239{
1240protected:
1241// may be used in future if we need cancelable threads...
1242#ifdef _MSWINDOWS_
1243 HANDLE cancellor;
1244#else
1245 void *cancellor;
1246#endif
1247
1248 enum {} reserved; // cancel mode?
1249 pthread_t tid;
1250 size_t stack;
1251 int priority;
1252
1253 /**
1254 * Create a thread object that will have a preset stack size. If 0
1255 * is used, then the stack size is os defined/default.
1256 * @param stack size to use or 0 for default.
1257 */
1258 Thread(size_t stack = 0);
1259
1260 /**
1261 * Map thread for get method. This should be called from start of the
1262 * run() method of a derived class.
1263 */
1264 void map(void);
1265
1266 /**
1267 * Check if running.
1268 */
1269 virtual bool is_active(void);
1270
1271public:
1272 /**
1273 * Set thread priority without disrupting scheduling if possible.
1274 * Based on scheduling policy. It is recommended that the process
1275 * is set for realtime scheduling, and this method is actually for
1276 * internal use.
1277 */
1278 void setPriority(void);
1279
1280 /**
1281 * Yield execution context of the current thread. This is a static
1282 * and may be used anywhere.
1283 */
1284 static void yield(void);
1285
1286 /**
1287 * Sleep current thread for a specified time period.
1288 * @param timeout to sleep for in milliseconds.
1289 */
1290 static void sleep(timeout_t timeout);
1291
1292 /**
1293 * Get mapped thread object. This returns the mapped base class of the
1294 * thread object of the current executing context. You will need to
1295 * cast to the correct derived class to access derived thread-specific
1296 * storage. If the current thread context is not mapped NULL is returned.
1297 */
1298 static Thread *get(void);
1299
1300 /**
1301 * Abstract interface for thread context run method.
1302 */
1303 virtual void run(void) = 0;
1304
1305 /**
1306 * Destroy thread object, thread-specific data, and execution context.
1307 */
1308 virtual ~Thread();
1309
1310 /**
1311 * Exit the thread context. This function should NO LONGER be called
1312 * directly to exit a running thread. Instead this method will only be
1313 * used to modify the behavior of the thread context at thread exit,
1314 * including detached threads which by default delete themselves. This
1315 * documented usage was changed to support Mozilla NSPR exit behavior
1316 * in case we support NSPR as an alternate thread runtime in the future.
1317 */
1318 virtual void exit(void);
1319
1320 /**
1321 * Used to initialize threading library. May be needed for some platforms.
1322 */
1323 static void init(void);
1324
1325 /**
1326 * Used to specify scheduling policy for threads above priority "0".
1327 * Normally we apply static realtime policy SCHED_FIFO (default) or
1328 * SCHED_RR. However, we could apply SCHED_OTHER, etc.
1329 */
1330 static void policy(int polid);
1331
1332 /**
1333 * Set concurrency level of process. This is essentially a portable
1334 * wrapper for pthread_setconcurrency.
1335 */
1336 static void concurrency(int level);
1337
1338 /**
1339 * Determine if two thread identifiers refer to the same thread.
1340 * @param thread1 to test.
1341 * @param thread2 to test.
1342 * @return true if both are the same context.
1343 */
1344 static bool equal(pthread_t thread1, pthread_t thread2);
1345
1346 /**
1347 * Get current thread id.
1348 * @return thread id.
1349 */
1350 static pthread_t self(void);
1351
1352 inline operator bool()
1353 {return is_active();}
1354
1355 inline bool operator!()
1356 {return !is_active();}
1357
1358 inline bool isRunning(void)
1359 {return is_active();}
1360};
1361
1362/**
1363 * A child thread object that may be joined by parent. A child thread is
1364 * a type of thread in which the parent thread (or process main thread) can
1365 * then wait for the child thread to complete and then delete the child object.
1366 * The parent thread can wait for the child thread to complete either by
1367 * calling join, or performing a "delete" of the derived child object. In
1368 * either case the parent thread will suspend execution until the child thread
1369 * exits.
1370 * @author David Sugar <dyfet@gnutelephony.org>
1371 */
1372class __EXPORT JoinableThread : public Thread
1373{
1374protected:
1375#ifdef _MSWINDOWS_
1376 HANDLE running;
1377#else
1378 volatile bool running;
1379#endif
1380 volatile bool joining;
1381
1382 /**
1383 * Create a joinable thread with a known context stack size.
1384 * @param size of stack for thread context or 0 for default.
1385 */
1386 JoinableThread(size_t size = 0);
1387
1388 /**
1389 * Delete child thread. Parent thread suspends until child thread
1390 * run method completes or child thread calls it's exit method.
1391 */
1392 virtual ~JoinableThread();
1393
1394 /**
1395 * Join thread with parent. Calling from a child thread to exit is
1396 * now depreciated behavior and in the future will not be supported.
1397 * Threads should always return through their run() method.
1398 */
1399 void join(void);
1400
1401 bool is_active(void);
1402
1403 virtual void run(void) = 0;
1404
1405public:
1406
1407 /**
1408 * Start execution of child context. This must be called after the
1409 * child object is created (perhaps with "new") and before it can be
1410 * joined. This method actually begins the new thread context, which
1411 * then calls the object's run method. Optionally raise the priority
1412 * of the thread when it starts under realtime priority.
1413 * @param priority of child thread.
1414 */
1415 void start(int priority = 0);
1416
1417 /**
1418 * Start execution of child context as background thread. This is
1419 * assumed to be off main thread, with a priority lowered by one.
1420 */
1421 inline void background(void)
1422 {start(-1);};
1423};
1424
1425/**
1426 * A detached thread object that is stand-alone. This object has no
1427 * relationship with any other running thread instance will be automatically
1428 * deleted when the running thread instance exits, either by it's run method
1429 * exiting, or explicity calling the exit member function.
1430 * @author David Sugar <dyfet@gnutelephony.org>
1431 */
1432class __EXPORT DetachedThread : public Thread
1433{
1434protected:
1435 bool active;
1436
1437 /**
1438 * Create a detached thread with a known context stack size.
1439 * @param size of stack for thread context or 0 for default.
1440 */
1441 DetachedThread(size_t size = 0);
1442
1443 /**
1444 * Destroys object when thread context exits. Never externally
1445 * deleted. Derived object may also have destructor to clean up
1446 * thread-specific member data.
1447 */
1448 ~DetachedThread();
1449
1450 /**
1451 * Exit context of detached thread. Thread object will be deleted.
1452 * This function should NO LONGER be called directly to exit a running
1453 * thread. Instead, the thread should only "return" through the run()
1454 * method to exit. The documented usage was changed so that exit() can
1455 * still be used to modify the "delete this" behavior of detached threads
1456 * while merging thread exit behavior with Mozilla NSPR.
1457 */
1458 void exit(void);
1459
1460 bool is_active(void);
1461
1462 virtual void run(void) = 0;
1463
1464public:
1465 /**
1466 * Start execution of detached context. This must be called after the
1467 * object is created (perhaps with "new"). This method actually begins
1468 * the new thread context, which then calls the object's run method.
1469 * @param priority to start thread with.
1470 */
1471 void start(int priority = 0);
1472};
1473
1474/**
1475 * Auto-pointer support class for locked objects. This is used as a base
1476 * class for the templated locked_instance class that uses the managed
1477 * LockedPointer to assign a reference to an object. When the locked
1478 * instance falls out of scope, the object is derefenced. Ideally the
1479 * pointer typed object should be based on the reference counted object class.
1480 * @author David Sugar <dyfet@gnutelephony.org>
1481 */
1482class __EXPORT locked_release
1483{
1484protected:
1485 ObjectProtocol *object; /**< locked object protected by locked_release */
1486
1487 /**
1488 * Create an unassigned locked object pointer base.
1489 */
1490 locked_release();
1491
1492 /**
1493 * Construct a locked object instance base from an existing instance. This
1494 * will create a duplicate (retained) reference.
1495 * @param object to copy from.
1496 */
1497 locked_release(const locked_release &object);
1498
1499public:
1500 /**
1501 * Construct a locked object instance base from a LockedPointer. References
1502 * a retained instance of the underlying object from the LockedPointer.
1503 * @param pointer of locked pointer to assign from.
1504 */
1505 locked_release(LockedPointer &pointer);
1506
1507 /**
1508 * Auto-release pointer to locked object instance. This is used to release
1509 * a reference when the pointer template falls out of scope.
1510 */
1511 ~locked_release();
1512
1513 /**
1514 * Manually release the object reference.
1515 */
1516 void release(void);
1517
1518 /**
1519 * Assign a locked object pointer. If an existing object is already
1520 * assigned, the existing pointer is released.
1521 * @param pointer reference through locked object.
1522 */
1523 locked_release &operator=(LockedPointer &pointer);
1524};
1525
1526/**
1527 * Auto-pointer support class for shared singleton objects. This is used as
1528 * a base class for the templated shared_instance class that uses shared
1529 * access locking through the SharedPointer class. When the shared instance
1530 * falls out of scope, the SharedPointer lock is released. The pointer
1531 * typed object must be based on the SharedObject class.
1532 * @author David Sugar <dyfet@gnutelephony.org>
1533 */
1534
1535class __EXPORT shared_release
1536{
1537protected:
1538 SharedPointer *ptr; /**< Shared lock for protected singleton */
1539
1540 /**
1541 * Create an unassigned shared singleton object pointer base.
1542 */
1543 shared_release();
1544
1545 /**
1546 * Construct a shared object instance base from an existing instance. This
1547 * will assign an additional shared lock.
1548 * @param object to copy from.
1549 */
1550 shared_release(const shared_release &object);
1551
1552public:
1553 /**
1554 * Access lock a shared singleton instance from a SharedPointer.
1555 * @param pointer of shared pointer to assign from.
1556 */
1557 shared_release(SharedPointer &pointer);
1558
1559 /**
1560 * Auto-unlock shared lock for singleton instance protected by shared
1561 * pointer. This is used to unlock when the instance template falls out
1562 * of scope.
1563 */
1564 ~shared_release();
1565
1566 /**
1567 * Manually release access to shared singleton object.
1568 */
1569 void release(void);
1570
1571 /**
1572 * Get pointer to singleton object that we have shared lock for.
1573 * @return shared object singleton.
1574 */
1575 SharedObject *get(void);
1576
1577 /**
1578 * Assign shared lock access to shared singleton. If an existing
1579 * shared lock is held for another pointer, it is released.
1580 * @param pointer access for shared object.
1581 */
1582 shared_release &operator=(SharedPointer &pointer);
1583};
1584
1585/**
1586 * Templated shared pointer for singleton shared objects of specific type.
1587 * This is used as typed template for the SharedPointer object reference
1588 * management class. This is used to supply a typed singleton shared
1589 * instance to the typed shared_instance template class.
1590 * @author David Sugar <dyfet@gnutelephony.org>
1591 */
1592template<class T>
1593class shared_pointer : public SharedPointer
1594{
1595public:
1596 /**
1597 * Created shared locking for typed singleton pointer.
1598 */
1599 inline shared_pointer() : SharedPointer() {};
1600
1601 /**
1602 * Acquire a shared (duplocate) reference to the typed singleton object.
1603 * This is a form of shared access lock. Derived classes and templates
1604 * access conditionallock "release" when the shared pointer is no longer
1605 * needed.
1606 * @return typed shared object.
1607 */
1608 inline const T *dup(void)
1609 {return static_cast<const T*>(SharedPointer::share());};
1610
1611 /**
1612 * Replace existing typed singleton instance with new one. This happens
1613 * during exclusive locking, and the commit method of the typed object
1614 * will be called.
1615 * @param object being set.
1616 */
1617 inline void replace(T *object)
1618 {SharedPointer::replace(object);};
1619
1620 /**
1621 * Replace existing typed singleton object through assignment.
1622 * @param object to assign.
1623 */
1624 inline void operator=(T *object)
1625 {replace(object);};
1626
1627 /**
1628 * Access shared lock typed singleton object by pointer reference.
1629 * @return typed shared object.
1630 */
1631 inline T *operator*()
1632 {return dup();};
1633};
1634
1635/**
1636 * Templated locked pointer for referencing locked objects of specific type.
1637 * This is used as typed template for the LockedPointer object reference
1638 * management class. This is used to supply a typed locked instances
1639 * to the typed locked_instance template class.
1640 * @author David Sugar <dyfet@gnutelephony.org>
1641 */
1642template<class T>
1643class locked_pointer : public LockedPointer
1644{
1645public:
1646 /**
1647 * Create an instance of a typed locked pointer.
1648 */
1649 inline locked_pointer() : LockedPointer() {};
1650
1651 /**
1652 * Create a duplicate reference counted instance of the current typed
1653 * object.
1654 * @return duplicate reference counted typed object.
1655 */
1656 inline T* dup(void)
1657 {return static_cast<T *>(LockedPointer::dup());};
1658
1659 /**
1660 * Replace existing typed object with a new one for next request.
1661 * @param object to register with pointer.
1662 */
1663 inline void replace(T *object)
1664 {LockedPointer::replace(object);};
1665
1666 /**
1667 * Replace existing object through assignment.
1668 * @param object to assign.
1669 */
1670 inline void operator=(T *object)
1671 {replace(object);};
1672
1673 /**
1674 * Create a duplicate reference counted instance of the current typed
1675 * object by pointer reference.
1676 * @return duplicate reference counted typed object.
1677 */
1678 inline T *operator*()
1679 {return dup();};
1680};
1681
1682/**
1683 * A templated smart pointer instance for lock protected objects.
1684 * This is used to reference an instance of a typed locked_pointer.
1685 * @author David Sugar <dyfet@gnutelephony.org>
1686 */
1687template<class T>
1688class locked_instance : public locked_release
1689{
1690public:
1691 /**
1692 * Construct empty locked instance of typed object.
1693 */
1694 inline locked_instance() : locked_release() {};
1695
1696 /**
1697 * Construct locked instance of typed object from matching locked_pointer.
1698 * @param pointer to get instance from.
1699 */
1700 inline locked_instance(locked_pointer<T> &pointer) : locked_release(pointer) {};
1701
1702 /**
1703 * Extract instance of locked typed object by pointer reference.
1704 * @return instance of typed object.
1705 */
1706 inline T& operator*() const
1707 {return *(static_cast<T&>(object));};
1708
1709 /**
1710 * Access member of instance of locked typed object by member reference.
1711 * @return instance of typed object.
1712 */
1713 inline T* operator->() const
1714 {return static_cast<T*>(object);};
1715
1716 /**
1717 * Get pointer to instance of locked typed object.
1718 * @return instance of typed object.
1719 */
1720 inline T* get(void) const
1721 {return static_cast<T*>(object);};
1722};
1723
1724/**
1725 * A templated smart pointer instance for shared singleton typed objects.
1726 * This is used to access the shared lock instance of the singleton.
1727 * @author David Sugar <dyfet@gnutelephony.org>
1728 */
1729template<class T>
1730class shared_instance : public shared_release
1731{
1732public:
1733 /**
1734 * Construct empty instance to reference shared typed singleton.
1735 */
1736 inline shared_instance() : shared_release() {};
1737
1738 /**
1739 * Construct shared access instance of shared typed singleton from matching
1740 * shared_pointer.
1741 * @param pointer to get instance from.
1742 */
1743 inline shared_instance(shared_pointer<T> &pointer) : shared_release(pointer) {};
1744
1745 /**
1746 * Access shared typed singleton object this instance locks and references.
1747 */
1748 inline const T& operator*() const
1749 {return *(static_cast<const T&>(ptr->pointer));};
1750
1751 /**
1752 * Access member of shared typed singleton object this instance locks and
1753 * references.
1754 */
1755 inline const T* operator->() const
1756 {return static_cast<const T*>(ptr->pointer);};
1757
1758 /**
1759 * Access pointer to typed singleton object this instance locks and
1760 * references.
1761 */
1762 inline const T* get(void) const
1763 {return static_cast<const T*>(ptr->pointer);};
1764};
1765
1766/**
1767 * Typed smart locked pointer class. This is used to manage references to
1768 * objects which are protected by an auto-generated mutex. The mutex is
1769 * released when the pointer falls out of scope.
1770 * @author David Sugar <dyfet@gnutelephony.org>
1771 */
1772template <class T>
1773class mutex_pointer : public auto_protect
1774{
1775public:
1776 /**
1777 * Create a pointer with no reference.
1778 */
1779 inline mutex_pointer() : auto_protect() {};
1780
1781 /**
1782 * Create a pointer with a reference to a heap object.
1783 * @param object we are referencing.
1784 */
1785 inline mutex_pointer(T* object) : auto_protect(object) {};
1786
1787 /**
1788 * Reference object we are pointing to through pointer indirection.
1789 * @return object we are pointing to.
1790 */
1791 inline T& operator*() const
1792 {return *(static_cast<T&>(auto_protect::object));};
1793
1794 /**
1795 * Reference member of object we are pointing to.
1796 * @return reference to member of pointed object.
1797 */
1798 inline T* operator->() const
1799 {return static_cast<T*>(auto_protect::object);};
1800
1801 /**
1802 * Get pointer to object.
1803 * @return pointer or NULL if we are not referencing an object.
1804 */
1805 inline T* get(void) const
1806 {return static_cast<T*>(auto_protect::object);};
1807};
1808
1809/**
1810 * Convenience function to start a joinable thread.
1811 * @param thread to start.
1812 * @param priority of thread.
1813 */
1814inline void start(JoinableThread *thread, int priority = 0)
1815 {thread->start(priority);}
1816
1817/**
1818 * Convenience function to start a detached thread.
1819 * @param thread to start.
1820 * @param priority of thread.
1821 */
1822inline void start(DetachedThread *thread, int priority = 0)
1823 {thread->start(priority);}
1824
1825/**
1826 * Convenience type for using conditional locks.
1827 */
1828typedef ConditionalLock condlock_t;
1829
1830/**
1831 * Convenience type for scheduling access.
1832 */
1833typedef ConditionalAccess accesslock_t;
1834
1835/**
1836 * Convenience type for using timed events.
1837 */
1838typedef TimedEvent timedevent_t;
1839
1840/**
1841 * Convenience type for using exclusive mutex locks.
1842 */
1843typedef Mutex mutex_t;
1844
1845/**
1846 * Convenience type for using read/write locks.
1847 */
1848typedef ThreadLock rwlock_t;
1849
1850/**
1851 * Convenience type for using recursive exclusive locks.
1852 */
1853typedef RecursiveMutex rexlock_t;
1854
1855/**
1856 * Convenience type for using counting semaphores.
1857 */
1858typedef Semaphore semaphore_t;
1859
1860/**
1861 * Convenience type for using thread barriers.
1862 */
1863typedef barrier barrier_t;
1864
1865/**
1866 * Convenience function to wait on a barrier.
1867 * @param barrier to wait.
1868 */
1869inline void wait(barrier_t &barrier)
1870 {barrier.wait();}
1871
1872/**
1873 * Convenience function to wait on a semaphore.
1874 * @param semaphore to wait on.
1875 * @param timeout to wait for.
1876 */
1877inline void wait(semaphore_t &semaphore, timeout_t timeout = Timer::inf)
1878 {semaphore.wait(timeout);}
1879
1880/**
1881 * Convenience function to release a semaphore.
1882 * @param semaphore to release.
1883 */
1884inline void release(semaphore_t &semaphore)
1885 {semaphore.release();}
1886
1887/**
1888 * Convenience function to acquire a mutex.
1889 * @param mutex to acquire.
1890 */
1891inline void acquire(mutex_t &mutex)
1892 {mutex.lock();}
1893
1894/**
1895 * Convenience function to release a mutex.
1896 * @param mutex to release.
1897 */
1898inline void release(mutex_t &mutex)
1899 {mutex.release();}
1900
1901/**
1902 * Convenience function to exclusively schedule conditional access.
1903 * @param lock to make exclusive.
1904 */
1905inline void modify(accesslock_t &lock)
1906 {lock.modify();}
1907
1908/**
1909 * Convenience function to shared read schedule conditional access.
1910 * @param lock to access shared.
1911 */
1912inline void access(accesslock_t &lock)
1913 {lock.access();}
1914
1915/**
1916 * Convenience function to release an access lock.
1917 * @param lock to release.
1918 */
1919inline void release(accesslock_t &lock)
1920 {lock.release();}
1921
1922/**
1923 * Convenience function to commit an exclusive access lock.
1924 * lock.
1925 * @param lock to commit.
1926 */
1927inline void commit(accesslock_t &lock)
1928 {lock.commit();}
1929
1930/**
1931 * Convenience function to exclusively lock shared conditional lock.
1932 * @param lock to make exclusive.
1933 */
1934inline void exclusive(condlock_t &lock)
1935 {lock.exclusive();}
1936
1937/**
1938 * Convenience function to restore shared access on a conditional lock.
1939 * @param lock to make shared.
1940 */
1941inline void share(condlock_t &lock)
1942 {lock.share();}
1943
1944/**
1945 * Convenience function to exclusively aquire a conditional lock.
1946 * @param lock to acquire for modify.
1947 */
1948inline void modify(condlock_t &lock)
1949 {lock.modify();}
1950
1951/**
1952 * Convenience function to commit and release an exclusively locked conditional
1953 * lock.
1954 * @param lock to commit.
1955 */
1956inline void commit(condlock_t &lock)
1957 {lock.commit();}
1958
1959/**
1960 * Convenience function for shared access to a conditional lock.
1961 * @param lock to access.
1962 */
1963inline void access(condlock_t &lock)
1964 {lock.access();}
1965
1966/**
1967 * Convenience function to release shared access to a conditional lock.
1968 * @param lock to release.
1969 */
1970inline void release(condlock_t &lock)
1971 {lock.release();}
1972
1973/**
1974 * Convenience function for exclusive write access to a read/write lock.
1975 * @param lock to write lock.
1976 * @param timeout to wait for exclusive locking.
1977 */
1978inline bool exclusive(rwlock_t &lock, timeout_t timeout = Timer::inf)
1979 {return lock.modify(timeout);}
1980
1981/**
1982 * Convenience function for shared read access to a read/write lock.
1983 * @param lock to share read lock.
1984 * @param timeout to wait for shared access.
1985 */
1986inline bool share(rwlock_t &lock, timeout_t timeout = Timer::inf)
1987 {return lock.access(timeout);}
1988
1989/**
1990 * Convenience function to release a shared lock.
1991 * @param lock to release.
1992 */
1993inline void release(rwlock_t &lock)
1994 {lock.release();}
1995
1996/**
1997 * Convenience function to lock a shared recursive mutex lock.
1998 * @param lock to acquire.
1999 */
2000inline void lock(rexlock_t &lock)
2001 {lock.lock();}
2002
2003/**
2004 * Convenience function to release a shared recursive mutex lock.
2005 * @param lock to release.
2006 */
2007inline void release(rexlock_t &lock)
2008 {lock.release();}
2009
2010inline bool _sync_protect_(const void *obj)
2011{
2012 Mutex::protect(obj);
2013 return true;
2014}
2015
2016inline bool _sync_release_(const void *obj)
2017{
2018 Mutex::release(obj);
2019 return false;
2020}
2021
2022inline bool _rw_reader_(const void *obj)
2023{
2024 ThreadLock::reader(obj);
2025 return true;
2026}
2027
2028inline bool _rw_writer_(const void *obj)
2029{
2030 ThreadLock::writer(obj);
2031 return true;
2032}
2033
2034inline bool _rw_release_(const void *obj)
2035{
2036 ThreadLock::release(obj);
2037 return false;
2038}
2039
2040#define ENTER_EXCLUSIVE \
2041 do { static pthread_mutex_t __sync__ = PTHREAD_MUTEX_INITIALIZER; \
2042 pthread_mutex_lock(&__sync__);
2043
2044#define LEAVE_EXCLUSIVE \
2045 pthread_mutex_unlock(&__sync__);} while(0);
2046
2047#define SYNC(obj) for(bool _sync_flag_ = _sync_protect_(obj); _sync_flag_; _sync_flag_ = _sync_release_(obj))
2048
2049#define SHARED(obj) for(bool _sync_flag_ = _rw_reader_(obj); _sync_flag_; _sync_flag_ = _rw_release_(obj))
2050
2051#define EXCLUSIVE(obj) for(bool _sync_flag_ = _rw_writer_(obj); _sync_flag_; _sync_flag_ = _rw_release_(obj))
2052
2053END_NAMESPACE
2054
2055#endif