00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00204 #include <cfg/os.h>
00205 #include <cfg/memory.h>
00206
00207 #include <string.h>
00208
00209 #include <sys/types.h>
00210 #include <sys/heap.h>
00211 #include <sys/atom.h>
00212 #include <sys/timer.h>
00213 #include <sys/event.h>
00214 #include <sys/thread.h>
00215 #include <sys/nutdebug.h>
00216
00217 #ifdef NUTDEBUG
00218 #include <sys/osdebug.h>
00219 #endif
00220
00221 #ifdef NUTTRACER
00222 #include <sys/tracer.h>
00223 #endif
00224
00229
00230 #if defined(NUT_CRITICAL_NESTING) && !defined(NUT_CRITICAL_NESTING_STACK)
00231 unsigned int critical_nesting_level;
00232 #endif
00233
00234 #if defined(__linux__) || defined(__APPLE__) || defined(__CYGWIN__)
00235
00236 extern void NutUnixThreadYieldHook(void);
00237 #endif
00238
00245 NUTTHREADINFO * runningThread;
00246
00253 NUTTHREADINFO * killedThread;
00254
00263 NUTTHREADINFO * nutThreadList;
00264
00272 NUTTHREADINFO * runQueue;
00273
00274
00275
00286 void NutThreadAddPriQueue(NUTTHREADINFO * td, NUTTHREADINFO * volatile *tqpp)
00287 {
00288 NUTTHREADINFO *tqp;
00289
00290 NUTASSERT(td != NULL);
00291
00292 td->td_queue = (HANDLE) tqpp;
00293 td->td_qpec = 0;
00294
00295
00296
00297
00298
00299
00300 NutEnterCritical();
00301 tqp = *tqpp;
00302
00303 if (tqp == SIGNALED) {
00304 tqp = 0;
00305 td->td_qpec++;
00306 } else if (tqp) {
00307 NutExitCritical();
00308
00309
00310 while (tqp && tqp->td_priority <= td->td_priority) {
00311 tqpp = &tqp->td_qnxt;
00312 tqp = tqp->td_qnxt;
00313 }
00314
00315 NutEnterCritical();
00316 }
00317
00318 td->td_qnxt = tqp;
00319
00320 *tqpp = td;
00321 if (td->td_qnxt && td->td_qnxt->td_qpec) {
00322 td->td_qpec += td->td_qnxt->td_qpec;
00323 td->td_qnxt->td_qpec = 0;
00324 }
00325 NutExitCritical();
00326 }
00327
00338 void NutThreadRemoveQueue(NUTTHREADINFO * td, NUTTHREADINFO * volatile *tqpp)
00339 {
00340 NUTTHREADINFO *tqp;
00341
00342 NutEnterCritical();
00343 tqp = *tqpp;
00344 NutExitCritical();
00345
00346 if (tqp != SIGNALED) {
00347 while (tqp) {
00348 if (tqp == td) {
00349 NutEnterCritical();
00350 *tqpp = td->td_qnxt;
00351 if (td->td_qpec) {
00352 if (td->td_qnxt) {
00353 td->td_qnxt->td_qpec = td->td_qpec;
00354 }
00355 td->td_qpec = 0;
00356 }
00357 NutExitCritical();
00358
00359 td->td_qnxt = 0;
00360 td->td_queue = 0;
00361 break;
00362 }
00363 tqpp = &tqp->td_qnxt;
00364 tqp = tqp->td_qnxt;
00365 }
00366 }
00367 }
00368
00379 void NutThreadResume(void)
00380 {
00381 NUTTHREADINFO *td;
00382 NUTTHREADINFO **qhp;
00383 NUTTHREADINFO *tqp;
00384 unsigned int cnt;
00385
00386
00387
00388
00389 td = nutThreadList;
00390 while (td) {
00391 NutEnterCritical();
00392 cnt = td->td_qpec;
00393 NutExitCritical();
00394 if (cnt) {
00395
00396
00397 qhp = (NUTTHREADINFO **)(td->td_queue);
00398 NutEnterCritical();
00399 td->td_qpec--;
00400 tqp = *qhp;
00401 NutExitCritical();
00402 if (tqp != SIGNALED) {
00403 NutEventPostAsync((HANDLE *)qhp);
00404 }
00405 }
00406 td = td->td_next;
00407 }
00408
00409
00410
00411
00412
00413 NutTimerProcessElapsed();
00414
00415
00416 if (runningThread != runQueue) {
00417 #ifdef NUTTRACER
00418 TRACE_ADD_ITEM(TRACE_TAG_THREAD_YIELD,(int)runningThread);
00419 #endif
00420
00421 if (runningThread->td_state == TDS_RUNNING) {
00422 runningThread->td_state = TDS_READY;
00423 }
00424 NutEnterCritical();
00425 NutThreadSwitch();
00426 NutExitCritical();
00427 }
00428 }
00429
00447 void NutThreadWake(HANDLE timer, HANDLE th)
00448 {
00449 NUTASSERT(th != NULL);
00450
00451
00452 ((NUTTHREADINFO *) th)->td_timer = 0;
00453 ((NUTTHREADINFO *) th)->td_state = TDS_READY;
00454 NutThreadAddPriQueue(th, (NUTTHREADINFO **) & runQueue);
00455 }
00456
00465 void NutThreadYield(void)
00466 {
00467
00468 #if defined(__linux__) || defined(__APPLE__) || defined(__CYGWIN__)
00469 NutEnterCritical();
00470 NutUnixThreadYieldHook();
00471 NutExitCritical();
00472 #endif
00473
00474
00475
00476
00477
00478
00479 if (runningThread->td_qnxt) {
00480 NutThreadRemoveQueue(runningThread, (NUTTHREADINFO **) & runQueue);
00481 NutThreadAddPriQueue(runningThread, (NUTTHREADINFO **) & runQueue);
00482 }
00483
00484
00485 NutThreadResume();
00486 }
00487
00515 uint8_t NutThreadSetPriority(uint8_t level)
00516 {
00517 uint8_t last = runningThread->td_priority;
00518
00519
00520
00521
00522
00523
00524 NutThreadRemoveQueue(runningThread, &runQueue);
00525 runningThread->td_priority = level;
00526 if (level < 255) {
00527 NutThreadAddPriQueue(runningThread, (NUTTHREADINFO **) & runQueue);
00528 } else {
00529 NutThreadKill();
00530 }
00531
00532
00533
00534
00535
00536 if (runningThread == runQueue) {
00537 runningThread->td_state = TDS_RUNNING;
00538 } else {
00539 runningThread->td_state = TDS_READY;
00540 #ifdef NUTTRACER
00541 TRACE_ADD_ITEM(TRACE_TAG_THREAD_SETPRIO,(int)runningThread);
00542 #endif
00543
00544 NutEnterCritical();
00545 NutThreadSwitch();
00546 NutExitCritical();
00547 }
00548
00549 return last;
00550 }
00551
00562 void NutThreadExit(void)
00563 {
00564 NutThreadSetPriority(255);
00565 }
00566
00576 void NutThreadDestroy(void)
00577 {
00578 if (killedThread) {
00579 NutStackFree(killedThread->td_memory);
00580 killedThread = 0;
00581 }
00582 }
00583
00591 void NutThreadKill(void)
00592 {
00593
00594 NUTTHREADINFO *pCur = nutThreadList;
00595 NUTTHREADINFO **pp = (NUTTHREADINFO **) & nutThreadList;
00596
00597
00598 NutThreadDestroy();
00599
00600
00601 while (pCur) {
00602 if (pCur == runningThread) {
00603 *pp = pCur->td_next;
00604 break;
00605 }
00606
00607 pp = (NUTTHREADINFO **) & pCur->td_next;
00608 pCur = pCur->td_next;
00609 }
00610
00611
00612 killedThread = runningThread;
00613 }
00614
00624 HANDLE GetThreadByName(char * name)
00625 {
00626 NUTTHREADINFO *tdp;
00627
00628 if (name) {
00629 for (tdp = nutThreadList; tdp; tdp = tdp->td_next) {
00630 if (strcmp(tdp->td_name, name) == 0)
00631 return tdp;
00632 }
00633 } else {
00634 return runningThread;
00635 }
00636 return NULL;
00637 }
00638
00639 #if defined(NUTDEBUG_CHECK_STACKMIN) || defined(NUTDEBUG_CHECK_STACK)
00640
00641 static size_t StackAvail(NUTTHREADINFO *td)
00642 {
00643 uint32_t *sp = (uint32_t *)td->td_memory;
00644
00645 while(*sp++ == DEADBEEF);
00646
00647 return (size_t)((uintptr_t)sp - (uintptr_t)td->td_memory);
00648 }
00649
00671 size_t NutThreadStackAvailable(char *name)
00672 {
00673 NUTTHREADINFO *tdp = (NUTTHREADINFO *)GetThreadByName(name);
00674
00675 return tdp ? StackAvail(tdp) : 0;
00676 }
00677
00689 NUTTHREADINFO *NutThreadStackCheck(size_t minsiz)
00690 {
00691 NUTTHREADINFO *tdp;
00692
00693 for (tdp = nutThreadList; tdp; tdp = tdp->td_next) {
00694 if (StackAvail(tdp) < minsiz) {
00695 break;
00696 }
00697 }
00698 return tdp;
00699 }
00700 #endif
00701