PS2SDK
PS2 Homebrew Libraries
Loading...
Searching...
No Matches
thbase.c
1#include "thbase.h"
2#include "loadcore.h"
3#include "sysclib.h"
4#include "sysmem.h"
5#include "kerr.h"
6#include "intrman.h"
7#include "thsemap.h"
8#include "xthbase.h"
9#include "xtimrman.h"
10
11#include "thcommon.h"
12
13static void clock_mul(iop_sys_clock_t *dst, iop_sys_clock_t *src, u32 mul);
14static void clock_div(iop_sys_clock_t *dst, iop_sys_clock_t *src, u32 d, u32 *r);
15
16static struct thread *refer_thread(int thid, int current);
17
18static void thread_get_run_stats(struct thread *thread, iop_thread_run_status_t *stat);
19static void thread_get_status(struct thread *thread, iop_thread_info_t *info);
20
21struct thread_context *GetThreadCtx()
22{
23 return &thctx;
24}
25
26int CreateThread(iop_thread_t *thparam)
27{
28 struct thread *thread;
29 void *stack;
30 int state;
31
32 if (QueryIntrContext()) {
33 return KE_ILLEGAL_CONTEXT;
34 }
35
36 if (thparam->attr & ~(TH_ASM | TH_C | TH_UMODE | TH_NO_FILLSTACK | TH_CLEAR_STACK)) {
37 return KE_ILLEGAL_ATTR;
38 }
39
40 if (thparam->priority > 126) {
41 return KE_ILLEGAL_PRIORITY;
42 }
43
44 if ((u32)thparam->thread & 3) {
45 return KE_ILLEGAL_ENTRY;
46 }
47
48 if (thparam->stacksize < 0x130) {
49 return KE_ILLEGAL_STACK_SIZE;
50 }
51
52 CpuSuspendIntr(&state);
53
54 thread = heap_alloc(TAG_THREAD, sizeof(*thread));
55 if (!thread) {
56 CpuResumeIntr(state);
57 return KE_NO_MEMORY;
58 }
59
60 thparam->stacksize = ALIGN_256(thparam->stacksize);
61 stack = AllocSysMemory(1, thparam->stacksize, 0);
62 if (!stack) {
63 heap_free(&thread->tag);
64 CpuResumeIntr(state);
65 return KE_NO_MEMORY;
66 }
67
68 thread->tag.id = ++thctx.thread_id;
69 thread->entry = thparam->thread;
70 thread->stack_size = thparam->stacksize;
71 thread->stack_top = stack;
72 thread->init_priority = thparam->priority;
73 thread->attr = thparam->attr;
74 thread->option = thparam->option;
75 thread->status = THS_DORMANT;
76
77 asm __volatile__("sw $gp, %0\n"
78 : "=m"(thread->gp)::);
79
80 list_insert(&thctx.thread_list, &thread->thread_list);
81
82 if ((thread->attr & TH_NO_FILLSTACK) == 0) {
83 // why -0x30?
84 memset(thread->stack_top, 0xff, thread->stack_size - 0x30);
85 }
86
87 CpuResumeIntr(state);
88
89 return MAKE_HANDLE(thread);
90}
91
92int DeleteThread(int thid)
93{
94 struct thread *thread;
95 int state;
96
97 if (QueryIntrContext()) {
98 return KE_ILLEGAL_CONTEXT;
99 }
100
101 if (thid == 0) {
102 return KE_ILLEGAL_THID;
103 }
104
105 CpuSuspendIntr(&state);
106
107 thread = HANDLE_PTR(thid);
108 if (!HANDLE_VERIFY(thid, TAG_THREAD) || thread == thctx.idle_thread) {
109 CpuResumeIntr(state);
110 return KE_UNKNOWN_THID;
111 }
112
113 if (thread->status != THS_DORMANT) {
114 CpuResumeIntr(state);
115 return KE_NOT_DORMANT;
116 }
117
118 if (thread->attr & TH_CLEAR_STACK) {
119 memset(thread->stack_top, 0, thread->stack_size);
120 }
121
122 FreeSysMemory(thread->stack_top);
123 list_remove(&thread->queue);
124 list_remove(&thread->thread_list);
125 heap_free(&thread->tag);
126
127 CpuResumeIntr(state);
128
129 return KE_OK;
130}
131
132int StartThread(int thid, void *arg)
133{
134 struct thread *thread;
135 u32 reg_offset;
136 int state;
137
138 if (QueryIntrContext()) {
139 return KE_ILLEGAL_CONTEXT;
140 }
141
142 if (thid == 0) {
143 return KE_ILLEGAL_THID;
144 }
145
146 CpuSuspendIntr(&state);
147
148 thread = HANDLE_PTR(thid);
149 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
150 CpuResumeIntr(state);
151 return KE_UNKNOWN_THID;
152 }
153
154 if (thread->status != THS_DORMANT) {
155 CpuResumeIntr(state);
156 return KE_NOT_DORMANT;
157 }
158
159 // zero out register state
160 reg_offset = ALIGN(thread->stack_size) - RESERVED_REGCTX_SIZE;
161 thread->saved_regs = thread->stack_top + reg_offset;
162 memset(thread->saved_regs, 0, RESERVED_REGCTX_SIZE);
163
164 thread->saved_regs->a0 = (u32)arg;
165
166 return thread_init_and_start(thread, state);
167}
168
169int StartThreadArgs(int thid, int args, void *argp)
170{
171 struct thread *thread;
172 u32 arg_offset, reg_offset;
173 int state;
174
175 if (QueryIntrContext()) {
176 return KE_ILLEGAL_CONTEXT;
177 }
178
179 if (thid == 0) {
180 return KE_ILLEGAL_THID;
181 }
182
183 CpuSuspendIntr(&state);
184
185 thread = HANDLE_PTR(thid);
186 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
187 CpuResumeIntr(state);
188 return KE_UNKNOWN_THID;
189 }
190
191 if (thread->status != THS_DORMANT) {
192 CpuResumeIntr(state);
193 return KE_NOT_DORMANT;
194 }
195
196 // stash the args at the bottom of stack
197 arg_offset = ALIGN(thread->stack_size) - ALIGN(args);
198 if (args > 0 && argp) {
199 memcpy(thread->stack_top + arg_offset, argp, args);
200 }
201
202 // BUG: memset was done before setting saved_regs in the struct
203 // would probably derefence null on a newly created thread
204 // memset(thread->saved_regs, 0, RESERVED_REGCTX_SIZE);
205
206 reg_offset = arg_offset - RESERVED_REGCTX_SIZE;
207 thread->saved_regs = thread->stack_top + reg_offset;
208
209 memset(thread->saved_regs, 0, RESERVED_REGCTX_SIZE);
210
211 thread->saved_regs->a0 = args;
212 thread->saved_regs->a1 = (u32)thread->stack_top + arg_offset;
213
214 return thread_init_and_start(thread, state);
215}
216
217int ExitThread()
218{
219 int state;
220
221 if (QueryIntrContext()) {
222 return KE_ILLEGAL_CONTEXT;
223 }
224
225 CpuSuspendIntr(&state);
226 thctx.current_thread->status = THS_DORMANT;
227 list_insert(&thctx.dormant_queue, &thctx.current_thread->queue);
228 thctx.run_next = NULL;
229 thread_leave(0, 0, state, 1);
230
231 Kprintf("panic ! Thread DORMANT !\n");
232 __builtin_trap();
233
234 return KE_OK;
235}
236
237int ExitDeleteThread()
238{
239 int state;
240
241 if (QueryIntrContext()) {
242 return KE_ILLEGAL_CONTEXT;
243 }
244
245 CpuSuspendIntr(&state);
246 thctx.current_thread->status = THS_DORMANT;
247 list_insert(&thctx.delete_queue, &thctx.current_thread->queue);
248 thctx.run_next = NULL;
249 thread_leave(0, 0, state, 1);
250
251 Kprintf("panic ! Thread ExitDeleted !\n");
252 __builtin_trap();
253
254 return KE_OK;
255}
256
257int TerminateThread(int thid)
258{
259 struct thread *thread;
260 int state;
261
262 if (QueryIntrContext()) {
263 return KE_ILLEGAL_CONTEXT;
264 }
265
266 CpuSuspendIntr(&state);
267
268 thread = HANDLE_PTR(thid);
269 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
270 CpuResumeIntr(state);
271 return KE_UNKNOWN_THID;
272 }
273
274 if (thread->status == THS_DORMANT) {
275 CpuResumeIntr(state);
276 return KE_DORMANT;
277 }
278
279 if (thread->status == THS_READY) {
280 readyq_remove(thread, thread->priority);
281 } else {
282 list_remove(&thread->queue);
283 if (thread->wait_type == TSW_DELAY) {
284 CancelAlarm(thread_delay_cb, thread);
285 } else if (thread->wait_type >= TSW_DELAY && thread->wait_type <= TSW_FPL) {
286 thread->wait_event->waiter_count--;
287 }
288 }
289
290 thread->status = THS_DORMANT;
291 list_insert(&thctx.dormant_queue, &thread->queue);
292
293 CpuResumeIntr(state);
294 return KE_OK;
295}
296
297int iTerminateThread(int thid)
298{
299 struct thread *thread;
300
301 if (!QueryIntrContext()) {
302 return KE_ILLEGAL_CONTEXT;
303 }
304
305 if (thid == 0) {
306 return KE_ILLEGAL_THID;
307 }
308
309 thread = HANDLE_PTR(thid);
310 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
311 return KE_UNKNOWN_THID;
312 }
313
314 if (thread->status == THS_DORMANT) {
315 return KE_DORMANT;
316 }
317
318 if (thread == thctx.current_thread) {
319 thctx.run_next = NULL;
320 } else {
321 if (thread->status == THS_READY) {
322 readyq_remove(thread, thread->priority);
323 } else {
324 list_remove(&thread->queue);
325
326 if (thread->status == THS_WAIT) {
327 if (thread->wait_type == TSW_DELAY) {
328 iCancelAlarm(thread_delay_cb, thread);
329 } else {
330 thread->wait_event->waiter_count--;
331 }
332 }
333 }
334 }
335
336 thread->status = THS_DORMANT;
337 list_insert(&thctx.dormant_queue, &thread->queue);
338
339 return KE_OK;
340}
341
342int DisableDispatchThread(void)
343{
344 return KE_ERROR;
345}
346
347int EnableDispatchThread(void)
348{
349 return KE_ERROR;
350}
351
352int ChangeThreadPriority(int thid, int priority)
353{
354 struct thread *thread;
355 int state;
356
357 if (QueryIntrContext()) {
358 return KE_ILLEGAL_CONTEXT;
359 }
360
361 CpuSuspendIntr(&state);
362
363 if (thid == 0) {
364 thread = thctx.current_thread;
365 } else {
366 thread = HANDLE_PTR(thid);
367 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
368 CpuResumeIntr(state);
369 return KE_UNKNOWN_THID;
370 }
371
372 if (thread->status == THS_DORMANT) {
373 CpuResumeIntr(state);
374 return KE_DORMANT;
375 }
376 }
377
378 if (priority) {
379 if (priority - 1 >= 126) {
380 CpuResumeIntr(state);
381 return KE_ILLEGAL_PRIORITY;
382 }
383 } else {
384 priority = thctx.current_thread->priority;
385 }
386
387 if (thread == thctx.current_thread) {
388 if (priority >= readyq_highest()) {
389 thread->status = THS_READY;
390 thread->priority = priority;
391 readyq_insert_back(thread);
392 thctx.run_next = NULL;
393
394 return thread_leave(KE_OK, 0, state, 0);
395 }
396
397 thread->priority = priority;
398 } else {
399 if (thread->status == THS_READY) {
400 readyq_remove(thread, thread->priority);
401 thread->priority = priority;
402
403 return thread_start(thread, state);
404 }
405
406 // BUG: there was no check for TSW_DELAY here
407 // in which case there would be a usec value in the wait_event union.
408 //
409 // Added check to prevent dereferencing garbage.
410
411 if (thread->status == THS_WAIT && thread->wait_type != TSW_DELAY) {
412 if (thread->wait_event->attr & SA_THPRI) {
413 waitlist_insert(thread, thread->wait_event, priority);
414 }
415 }
416 }
417
418 CpuResumeIntr(state);
419 return KE_OK;
420}
421
422int iChangeThreadPriority(int thid, int priority)
423{
424 struct thread *thread;
425
426 if (!QueryIntrContext()) {
427 return KE_ILLEGAL_CONTEXT;
428 }
429
430 if (thid == 0) {
431 return KE_ILLEGAL_THID;
432 }
433
434 thread = HANDLE_PTR(thid);
435 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
436 return KE_UNKNOWN_THID;
437 }
438
439 if (thread->status == THS_DORMANT) {
440 return KE_DORMANT;
441 }
442
443 if (priority == 0) {
444 priority = thctx.current_thread->priority;
445 }
446
447 if (priority - 1 >= 126) {
448 return KE_ILLEGAL_PRIORITY;
449 }
450
451 if (thread == thctx.current_thread) {
452 thread->status = THS_READY;
453 } else {
454 if (thread->status != THS_READY) {
455 thread->priority = priority;
456 if (thread->status != THS_WAIT || thread->wait_type == TSW_DELAY) {
457 return KE_OK;
458 }
459
460 if ((thread->wait_event->attr & 1) == 0) {
461 return 0;
462 }
463
464 waitlist_insert(thread, thread->wait_event, priority);
465 }
466 }
467
468 thread->priority = priority;
469 readyq_insert_back(thread);
470 thctx.run_next = NULL;
471
472 return KE_OK;
473}
474
475int RotateThreadReadyQueue(int priority)
476{
477 struct thread *thread;
478 int state;
479
480 if (QueryIntrContext()) {
481 return KE_ILLEGAL_CONTEXT;
482 }
483
484 if (priority >= 127) {
485 return KE_ILLEGAL_PRIORITY;
486 }
487
488 CpuSuspendIntr(&state);
489
490 thread = thctx.current_thread;
491
492 if (priority == 0) {
493 priority = thread->priority;
494 }
495
496 if (list_empty(&thctx.ready_queue[priority])) {
497 CpuResumeIntr(state);
498 return KE_OK;
499 }
500
501 if (priority != thread->priority) {
502 thread = list_first_entry(&thctx.ready_queue[priority], struct thread, queue);
503 list_remove(&thread->queue);
504 list_insert(&thctx.ready_queue[priority], &thread->queue);
505
506 CpuResumeIntr(state);
507 return KE_OK;
508 }
509
510 thread->status = THS_READY;
511 readyq_insert_back(thread);
512 thctx.run_next = 0;
513 return thread_leave(KE_OK, 0, state, 0);
514}
515
516int iRotateThreadReadyQueue(int priority)
517{
518 struct thread *thread;
519
520 if (!QueryIntrContext()) {
521 return KE_ILLEGAL_CONTEXT;
522 }
523
524 thread = thctx.current_thread;
525
526 if (priority) {
527 if (priority >= 126) {
528 return KE_ILLEGAL_PRIORITY;
529 }
530 } else {
531 priority = readyq_highest();
532 if (thread->priority < priority) {
533 priority = thread->priority;
534 }
535 }
536
537 if (list_empty(&thctx.ready_queue[priority])) {
538 return KE_OK;
539 }
540
541 if (priority == thread->priority) {
542 thread->status = THS_READY;
543 readyq_insert_back(thread);
544 thctx.run_next = NULL;
545 } else {
546 thread = list_first_entry(&thctx.ready_queue[priority], struct thread, queue);
547 list_remove(&thread->queue);
548 list_insert(&thctx.ready_queue[priority], &thread->queue);
549 }
550
551 return KE_OK;
552}
553
554int ReleaseWaitThread(int thid)
555{
556 struct thread *thread;
557 int state;
558
559 if (QueryIntrContext()) {
560 return KE_ILLEGAL_CONTEXT;
561 }
562
563 if (thid == 0) {
564 return KE_ILLEGAL_THID;
565 }
566
567 CpuSuspendIntr(&state);
568
569 thread = HANDLE_PTR(thid);
570 if (thread == thctx.current_thread) {
571 CpuResumeIntr(state);
572 return KE_ILLEGAL_THID;
573 }
574
575 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
576 CpuResumeIntr(state);
577 return KE_UNKNOWN_THID;
578 }
579
580 if (thread->status != THS_WAIT) {
581 CpuResumeIntr(state);
582 return KE_NOT_WAIT;
583 }
584
585 thread->saved_regs->v0 = KE_RELEASE_WAIT;
586 list_remove(&thread->queue);
587 thread->status = THS_READY;
588
589 if (thread->wait_type == TSW_DELAY) {
590 CancelAlarm(thread_delay_cb, thread);
591 } else {
592 thread->wait_event->waiter_count--;
593 }
594
595 return thread_start(thread, state);
596}
597
598int iReleaseWaitThread(int thid)
599{
600 struct thread *thread;
601
602 if (!QueryIntrContext()) {
603 return KE_ILLEGAL_CONTEXT;
604 }
605
606 if (thid == 0) {
607 return KE_ILLEGAL_THID;
608 }
609
610 thread = HANDLE_PTR(thid);
611
612 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
613 return KE_UNKNOWN_THID;
614 }
615
616 if (thread->status != THS_WAIT) {
617 return KE_NOT_WAIT;
618 }
619
620 thread->saved_regs->v0 = KE_RELEASE_WAIT;
621 list_remove(&thread->queue);
622 thread->status = THS_READY;
623
624 if (thread->wait_type == TSW_DELAY) {
625 iCancelAlarm(thread_delay_cb, thread);
626 } else {
627 thread->wait_event->waiter_count--;
628 }
629
630 readyq_insert_back(thread);
631 thctx.run_next = NULL;
632
633 return KE_OK;
634}
635
636int GetThreadId(void)
637{
638 if (QueryIntrContext()) {
639 return KE_ILLEGAL_CONTEXT;
640 }
641
642 return MAKE_HANDLE(thctx.current_thread);
643}
644
645int CheckThreadStack(void)
646{
647 if (QueryIntrContext()) {
648 return KE_OK;
649 }
650
651 return check_thread_stack();
652}
653
654int ReferThreadStatus(int thid, iop_thread_info_t *info)
655{
656 struct thread *thread;
657 int state;
658
659 if (QueryIntrContext()) {
660 return KE_ILLEGAL_CONTEXT;
661 }
662
663 CpuSuspendIntr(&state);
664
665 thread = refer_thread(thid, 1);
666 if (thread < 0) {
667 CpuResumeIntr(state);
668 return (int)thread;
669 }
670
671 thread_get_status(thread, info);
672
673 CpuResumeIntr(state);
674
675 return KE_OK;
676}
677
678int iReferThreadStatus(int thid, iop_thread_info_t *info)
679{
680 struct thread *thread;
681
682 if (thid == 0) {
683 return KE_ILLEGAL_THID;
684 }
685
686 thread = HANDLE_PTR(thid);
687
688 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
689 return KE_UNKNOWN_THID;
690 }
691
692 thread_get_status(thread, info);
693
694 return KE_OK;
695}
696
697int SleepThread(void)
698{
699 struct thread *thread;
700 int state;
701
702 if (QueryIntrContext()) {
703 return KE_ILLEGAL_CONTEXT;
704 }
705
706 if (CpuSuspendIntr(&state) == KE_CPUDI && (thctx.debug_flags & 8)) {
707 Kprintf("WARNING: SleepThread KE_CAN_NOT_WAIT\n");
708 }
709 check_thread_stack();
710
711 thread = thctx.current_thread;
712
713 if (thread->wakeup_count != 0) {
714 thread->wakeup_count--;
715 CpuResumeIntr(state);
716 return KE_OK;
717 }
718
719 thread->status = THS_WAIT;
720 thread->wait_type = TSW_SLEEP;
721 thread->wait_event = NULL;
722 // thread->wait_return = 0;
723 thctx.run_next = NULL;
724
725 list_insert(&thctx.sleep_queue, &thread->queue);
726
727 return thread_leave(KE_OK, 0, state, 1);
728}
729
730int WakeupThread(int thid)
731{
732 struct thread *thread;
733 int state;
734
735 if (QueryIntrContext()) {
736 return KE_ILLEGAL_CONTEXT;
737 }
738
739 if (thid == 0) {
740 return KE_ILLEGAL_THID;
741 }
742
743 CpuSuspendIntr(&state);
744
745 thread = HANDLE_PTR(thid);
746 if (thread == thctx.current_thread) {
747 return KE_ILLEGAL_THID;
748 }
749
750 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
751 return KE_UNKNOWN_THID;
752 }
753
754 if (thread->status == THS_DORMANT) {
755 CpuResumeIntr(state);
756 return KE_DORMANT;
757 }
758
759 if (thread->status == THS_WAIT && thread->wait_type == TSW_SLEEP) {
760 list_remove(&thread->queue);
761 thread->status = THS_READY;
762
763 return thread_start(thread, state);
764 }
765
766 thread->wakeup_count++;
767
768 CpuResumeIntr(state);
769
770 return KE_OK;
771}
772
773int iWakeupThread(int thid)
774{
775 struct thread *thread;
776
777 if (!QueryIntrContext()) {
778 return KE_ILLEGAL_CONTEXT;
779 }
780
781 if (thid == 0) {
782 return KE_ILLEGAL_THID;
783 }
784
785 thread = HANDLE_PTR(thid);
786 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
787 return KE_UNKNOWN_THID;
788 }
789
790 if (thread->status == THS_DORMANT) {
791 return KE_DORMANT;
792 }
793
794 if (thread->status == THS_WAIT && thread->wait_type == TSW_SLEEP) {
795 list_remove(&thread->queue);
796 thread->status = THS_READY;
797 readyq_insert_back(thread);
798 thctx.run_next = NULL;
799 } else {
800 thread->wakeup_count++;
801 }
802
803 return KE_OK;
804}
805
806int CancelWakeupThread(int thid)
807{
808 struct thread *thread;
809 int state, wakeup_count;
810
811 if (QueryIntrContext()) {
812 return KE_ILLEGAL_CONTEXT;
813 }
814
815 CpuSuspendIntr(&state);
816 if (thid) {
817 thread = HANDLE_PTR(thid);
818 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
819 CpuResumeIntr(state);
820 return KE_UNKNOWN_THID;
821 }
822 } else {
823 thread = thctx.current_thread;
824 }
825
826 wakeup_count = thread->wakeup_count;
827 thread->wakeup_count = 0;
828
829 CpuResumeIntr(state);
830 return wakeup_count;
831}
832
833int iCancelWakeupThread(int thid)
834{
835 struct thread *thread;
836 int wakeup_count;
837
838 if (!QueryIntrContext()) {
839 return KE_ILLEGAL_CONTEXT;
840 }
841
842 if (thid == 0) {
843 return KE_ILLEGAL_THID;
844 }
845
846 thread = HANDLE_PTR(thid);
847 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
848 return KE_UNKNOWN_THID;
849 }
850
851 wakeup_count = thread->wakeup_count;
852 thread->wakeup_count = 0;
853
854 return wakeup_count;
855}
856
857int SuspendThread(int thid)
858{
859 return KE_ERROR;
860}
861
862int iSuspendThread(int thid)
863{
864 return KE_ERROR;
865}
866
867int ResumeThread(int thid)
868{
869 return KE_ERROR;
870}
871
872int iResumeThread(int thid)
873{
874 return KE_ERROR;
875}
876
877int DelayThread(int usec)
878{
879 iop_sys_clock_t clock;
880 struct thread *thread;
881 int ret, state;
882
883 if (QueryIntrContext()) {
884 return KE_ILLEGAL_CONTEXT;
885 }
886
887 USec2SysClock(usec, &clock);
888 if (CpuSuspendIntr(&state) == KE_CPUDI && (thctx.debug_flags & 8)) {
889 Kprintf("WARNING: DelayThread KE_CAN_NOT_WAIT\n");
890 }
891 check_thread_stack();
892
893 thread = thctx.current_thread;
894
895 ret = SetAlarm(&clock, thread_delay_cb, thread);
896 if (ret != KE_OK) {
897 CpuResumeIntr(state);
898 return ret;
899 }
900
901 thread->status = THS_WAIT;
902 thread->wait_type = TSW_DELAY;
903 thread->wait_usecs = usec;
904 // thread->wait_return = 0;
905
906 thctx.run_next = NULL;
907 list_insert(&thctx.delay_queue, &thread->queue);
908
909 return thread_leave(KE_OK, 0, state, 1);
910}
911
912
913int GetSystemTime(iop_sys_clock_t *sys_clock)
914{
915 return CpuInvokeInKmode(read_sys_time, sys_clock);
916}
917
918int SetAlarm(iop_sys_clock_t *sys_clock, unsigned int (*alarm_cb)(void *), void *arg)
919{
920 iop_sys_clock_t systime;
921 struct alarm *alarm;
922 int state;
923 u64 time;
924
925 if (QueryIntrContext()) {
926 return KE_ILLEGAL_CONTEXT;
927 }
928
929 CpuSuspendIntr(&state);
930
931 if (!list_empty(&thctx.alarm)) {
932 list_for_each (alarm, &thctx.alarm, alarm_list) {
933 if (alarm->cb == alarm_cb && alarm->userptr == arg) {
934 CpuResumeIntr(state);
935 return KE_FOUND_HANDLER;
936 }
937 }
938 }
939
940 alarm = alarm_alloc();
941 if (!alarm) {
942 CpuResumeIntr(state);
943 return KE_NO_MEMORY;
944 }
945
946 if (sys_clock->hi == 0 && sys_clock->lo < thctx.min_wait) {
947 sys_clock->lo = thctx.min_wait;
948 }
949
950 GetSystemTime(&systime);
951 // as_u64(systime.hi, systime.lo); // ???
952
953 alarm->target = add64(sys_clock->hi, sys_clock->lo, systime.hi, systime.lo);
954 alarm->cb = alarm_cb;
955 alarm->userptr = arg;
956 thctx.alarm_count++;
957 alarm_insert(&thctx.alarm, alarm);
958
959 GetSystemTime(&systime);
960 time = as_u64(systime.hi, systime.lo);
961 update_timer_compare(thctx.timer_id, time, &thctx.alarm);
962
963 CpuResumeIntr(state);
964
965 return KE_OK;
966}
967
968int iSetAlarm(iop_sys_clock_t *sys_clock, unsigned int (*alarm_cb)(void *), void *arg)
969{
970 struct alarm *alarm;
971 iop_sys_clock_t systime;
972 u64 time;
973
974 if (!QueryIntrContext()) {
975 return KE_ILLEGAL_CONTEXT;
976 }
977
978 list_for_each (alarm, &thctx.alarm, alarm_list) {
979 if (alarm->cb == alarm_cb && alarm->userptr == arg) {
980 return KE_FOUND_HANDLER;
981 }
982 }
983
984 alarm = alarm_alloc();
985 if (!alarm) {
986 return KE_NO_MEMORY;
987 }
988
989 if (sys_clock->hi == 0 && sys_clock->lo < thctx.min_wait) {
990 sys_clock->lo = thctx.min_wait;
991 }
992
993 read_sys_time(&systime);
994 // as_u64(systime.hi, systime.lo);
995 alarm->target = add64(sys_clock->hi, sys_clock->lo, systime.hi, systime.lo);
996 alarm->cb = alarm_cb;
997 alarm->userptr = arg;
998 thctx.alarm_count++;
999 alarm_insert(&thctx.alarm, alarm);
1000 read_sys_time(&systime);
1001 time = as_u64(systime.hi, systime.lo);
1002 update_timer_compare(thctx.timer_id, time, &thctx.alarm);
1003
1004 return KE_OK;
1005}
1006
1007int CancelAlarm(unsigned int (*alarm_cb)(void *), void *arg)
1008{
1009 struct alarm *alarm;
1010 int state;
1011
1012 if (QueryIntrContext()) {
1013 return KE_ILLEGAL_CONTEXT;
1014 }
1015
1016 CpuSuspendIntr(&state);
1017
1018 if (list_empty(&thctx.alarm)) {
1019 CpuResumeIntr(state);
1020 return KE_NOTFOUND_HANDLER;
1021 }
1022
1023 list_for_each (alarm, &thctx.alarm, alarm_list) {
1024 if (alarm->cb == alarm_cb && alarm->userptr == arg) {
1025 list_remove(&alarm->alarm_list);
1026 alarm_free(alarm);
1027 thctx.alarm_count--;
1028
1029 CpuResumeIntr(state);
1030 return KE_OK;
1031 }
1032 }
1033
1034 CpuResumeIntr(state);
1035
1036 return KE_NOTFOUND_HANDLER;
1037}
1038
1039int iCancelAlarm(unsigned int (*alarm_cb)(void *), void *arg)
1040{
1041 struct alarm *alarm;
1042
1043 if (!QueryIntrContext()) {
1044 return KE_ILLEGAL_CONTEXT;
1045 }
1046
1047 if (list_empty(&thctx.alarm)) {
1048 return KE_NOTFOUND_HANDLER;
1049 }
1050
1051 list_for_each (alarm, &thctx.alarm, alarm_list) {
1052 if (alarm->cb == alarm_cb && alarm->userptr == arg) {
1053 list_remove(&alarm->alarm_list);
1054 alarm_free(alarm);
1055 thctx.alarm_count--;
1056
1057 return KE_OK;
1058 }
1059 }
1060
1061 return KE_NOTFOUND_HANDLER;
1062}
1063
1064void USec2SysClock(u32 usec, iop_sys_clock_t *sys_clock)
1065{
1066 sys_clock->hi = 0;
1067 sys_clock->lo = usec;
1068 clock_mul(sys_clock, sys_clock, thctx.unk_clock_mult);
1069 clock_div(sys_clock, sys_clock, thctx.unk_clock_div, NULL);
1070}
1071
1072void SysClock2USec(iop_sys_clock_t *sys_clock, u32 *sec, u32 *usec)
1073{
1074 iop_sys_clock_t clock;
1075 clock_mul(&clock, sys_clock, thctx.unk_clock_div);
1076 clock_div(&clock, &clock, thctx.unk_clock_mult, NULL);
1077 clock_div(&clock, &clock, 1000000, usec);
1078 *sec = clock.lo;
1079}
1080
1081int GetSystemStatusFlag()
1082{
1083 return thctx.sytem_status_flag;
1084}
1085
1086int GetThreadCurrentPriority(void)
1087{
1088 if (QueryIntrContext()) {
1089 return KE_ILLEGAL_CONTEXT;
1090 }
1091
1092 return thctx.current_thread->priority;
1093}
1094
1095unsigned int GetSystemTimeLow(void)
1096{
1097 return GetTimerCounter(thctx.timer_id);
1098}
1099
1100int ReferSystemStatus(iop_sys_status_t *info, size_t size)
1101{
1102 int state, ret;
1103 if (size < sizeof(*info)) {
1104 return KE_ERROR;
1105 }
1106
1107 memset(info, 0, size);
1108
1109 ret = CpuSuspendIntr(&state);
1110
1111 if (QueryIntrContext()) {
1112 info->status = TSS_NOTHREAD;
1113 } else if (ret == KE_CPUDI) {
1114 info->status = TSS_DISABLEINTR;
1115 } else {
1116 info->status = TSS_THREAD;
1117 }
1118
1119 info->systemLowTimerWidth = 32;
1120 info->idleClocks.hi = thctx.idle_thread->run_clocks_hi;
1121 info->idleClocks.lo = thctx.idle_thread->run_clocks_lo;
1122 info->threadSwitchCount = thctx.thread_switch_count;
1123 info->comesOutOfIdleCount = thctx.idle_thread->irq_preemption_count;
1124
1125 CpuResumeIntr(state);
1126
1127 return KE_OK;
1128}
1129
1130int ReferThreadRunStatus(int thid, iop_thread_run_status_t *stat, size_t size)
1131{
1132 struct thread *thread;
1133 int state;
1134
1135 if (QueryIntrContext()) {
1136 return KE_ILLEGAL_CONTEXT;
1137 }
1138
1139 CpuSuspendIntr(&state);
1140
1141 thread = refer_thread(thid, 1);
1142 if ((int)thread <= 0) {
1143 CpuResumeIntr(state);
1144 return (int)thread;
1145 }
1146
1147 if (size < sizeof(*stat)) {
1148 CpuResumeIntr(state);
1149 return KE_ILLEGAL_SIZE;
1150 }
1151
1152 memset(stat, 0, size);
1153 thread_get_run_stats(thread, stat);
1154
1155 CpuResumeIntr(state);
1156 return KE_OK;
1157}
1158
1159/*
1160 * Gets the minimum stack size left so far
1161 * only works if stack filling was not disabled
1162 */
1163int GetThreadStackFreeSize(int thid)
1164{
1165 struct thread *thread;
1166 u32 stack_size, i;
1167 u32 *stack;
1168 int state;
1169
1170 if (QueryIntrContext()) {
1171 return KE_ILLEGAL_CONTEXT;
1172 }
1173
1174 CpuSuspendIntr(&state);
1175
1176 thread = refer_thread(thid, 1);
1177 if ((int)thread < 0) {
1178 CpuResumeIntr(state);
1179 return (int)thread;
1180 }
1181
1182 stack = thread->stack_top;
1183 stack_size = thread->stack_size / 4;
1184 CpuResumeIntr(state);
1185
1186 for (i = 0; i < stack_size; i++) {
1187 if (stack[i] != -1) {
1188 return i * 4;
1189 }
1190 }
1191
1192 return i * 4;
1193}
1194
1195
1196int GetThreadmanIdList(int type, int *readbuf, int readbufsize, int *objectcount)
1197{
1198 int state, write_count, obj_count;
1199
1200 if (QueryIntrContext()) {
1201 return KE_ILLEGAL_CONTEXT;
1202 }
1203
1204 CpuSuspendIntr(&state);
1205
1206 write_count = 0;
1207 obj_count = 0;
1208
1209 switch (type) {
1210 case TMID_Thread: {
1211 struct thread *thread;
1212 list_for_each (thread, &thctx.thread_list, thread_list) {
1213 if (thread != thctx.idle_thread) {
1214 if (write_count < readbufsize) {
1215 *readbuf++ = MAKE_HANDLE(thread);
1216 write_count++;
1217 }
1218 obj_count++;
1219 }
1220 }
1221 } break;
1222 case TMID_Semaphore: {
1223 struct semaphore *sema;
1224 list_for_each (sema, &thctx.semaphore, sema_list) {
1225 if (write_count < readbufsize) {
1226 *readbuf++ = MAKE_HANDLE(sema);
1227 write_count++;
1228 }
1229 obj_count++;
1230 }
1231 } break;
1232 case TMID_EventFlag: {
1233 struct event_flag *evf;
1234 list_for_each (evf, &thctx.event_flag, evf_list) {
1235 if (write_count < readbufsize) {
1236 *readbuf++ = MAKE_HANDLE(evf);
1237 write_count++;
1238 }
1239 obj_count++;
1240 }
1241 } break;
1242 case TMID_Mbox: {
1243 struct mbox *mbx;
1244 list_for_each (mbx, &thctx.mbox, mbox_list) {
1245 if (write_count < readbufsize) {
1246 *readbuf++ = MAKE_HANDLE(mbx);
1247 write_count++;
1248 }
1249 obj_count++;
1250 }
1251 } break;
1252 case TMID_Vpl: {
1253 struct vpool *vpl;
1254 list_for_each (vpl, &thctx.vpool, vpl_list) {
1255 if (write_count < readbufsize) {
1256 *readbuf++ = MAKE_HANDLE(vpl);
1257 write_count++;
1258 }
1259 obj_count++;
1260 }
1261 } break;
1262 case TMID_Fpl: {
1263 struct fpool *fpl;
1264 list_for_each (fpl, &thctx.fpool, fpl_list) {
1265 if (write_count < readbufsize) {
1266 *readbuf++ = MAKE_HANDLE(fpl);
1267 write_count++;
1268 }
1269 obj_count++;
1270 }
1271 } break;
1272 case TMID_SleepThread: {
1273 struct thread *thread;
1274 list_for_each (thread, &thctx.sleep_queue, queue) {
1275 if (write_count < readbufsize) {
1276 *readbuf++ = MAKE_HANDLE(thread);
1277 write_count++;
1278 }
1279 obj_count++;
1280 }
1281 } break;
1282 case TMID_DelayThread: {
1283 struct thread *thread;
1284 list_for_each (thread, &thctx.delay_queue, queue) {
1285 if (write_count < readbufsize) {
1286 *readbuf++ = MAKE_HANDLE(thread);
1287 write_count++;
1288 }
1289 obj_count++;
1290 }
1291 } break;
1292 case TMID_DormantThread: {
1293 struct thread *thread;
1294 list_for_each (thread, &thctx.dormant_queue, queue) {
1295 if (write_count < readbufsize) {
1296 *readbuf++ = MAKE_HANDLE(thread);
1297 write_count++;
1298 }
1299 obj_count++;
1300 }
1301 } break;
1302 default: {
1303 struct heaptag *tag = HANDLE_PTR(type);
1304 struct thread *thread;
1305 struct event *event;
1306
1307 if (type < 0 || tag->tag < TAG_SEMA || tag->tag > TAG_FPL || tag->id != HANDLE_ID(type)) {
1308 CpuResumeIntr(state);
1309 return KE_ILLEGAL_TYPE;
1310 }
1311
1312 switch (tag->tag) {
1313 case TAG_SEMA:
1314 event = &((struct semaphore *)tag)->event;
1315 break;
1316 case TAG_EVF:
1317 event = &((struct event_flag *)tag)->event;
1318 break;
1319 case TAG_MBX:
1320 event = &((struct mbox *)tag)->event;
1321 break;
1322 case TAG_VPL:
1323 event = &((struct vpool *)tag)->event;
1324 break;
1325 case TAG_FPL:
1326 event = &((struct fpool *)tag)->event;
1327 break;
1328 }
1329
1330 list_for_each (thread, &event->waiters, queue) {
1331 if (write_count < readbufsize) {
1332 *readbuf++ = MAKE_HANDLE(thread);
1333 write_count++;
1334 }
1335 obj_count++;
1336 }
1337 }
1338 }
1339
1340 CpuResumeIntr(state);
1341
1342 if (objectcount) {
1343 *objectcount = obj_count;
1344 }
1345
1346 return write_count;
1347}
1348
1349static void clock_mul(iop_sys_clock_t *dst, iop_sys_clock_t *src, u32 mul)
1350{
1351 u64 res;
1352 res = (u64)src->hi << 32 | src->lo;
1353 res *= mul;
1354 if (dst) {
1355 dst->hi = res >> 32;
1356 dst->lo = res;
1357 }
1358}
1359
1360// TODO clean up
1361static void clock_div(iop_sys_clock_t *dst, iop_sys_clock_t *src, u32 d, u32 *r)
1362{
1363 int v4;
1364 u32 hi;
1365 u32 lo;
1366 u32 v7;
1367 u32 v8;
1368 u32 v9;
1369 int i;
1370 unsigned int v11;
1371 u32 v12;
1372 u32 v13;
1373
1374 v4 = 0;
1375 hi = src->hi;
1376 lo = src->lo;
1377 v7 = hi / d;
1378 v8 = hi % d;
1379 v9 = v7;
1380 for (i = 0; i < 4; ++i) {
1381 v11 = (v8 << 8) | (lo >> 24);
1382 lo <<= 8;
1383 v12 = v11 / d;
1384 v4 = (v4 << 8) | (v9 >> 24);
1385 v13 = v11 % d;
1386 v8 = v11 % d;
1387 v9 = (v9 << 8) + v12;
1388 }
1389 if (dst) {
1390 dst->hi = v4;
1391 dst->lo = v9;
1392 }
1393 if (r) {
1394 *r = v13;
1395 }
1396}
1397
1398
1399static struct thread *refer_thread(int thid, int current)
1400{
1401 struct thread *thread;
1402
1403 if (!thid && current) {
1404 return thctx.current_thread;
1405 }
1406
1407 thread = HANDLE_PTR(thid);
1408 if (!HANDLE_VERIFY(thid, TAG_THREAD)) {
1409 return (struct thread *)KE_UNKNOWN_THID;
1410 }
1411
1412 return thread;
1413}
1414
1415static void thread_get_status(struct thread *thread, iop_thread_info_t *info)
1416{
1417 memset(info, 0, sizeof(*info));
1418 info->status = thread->status;
1419 info->currentPriority = thread->priority;
1420 info->initPriority = thread->init_priority;
1421 info->entry = thread->entry;
1422 info->stack = thread->stack_top;
1423 info->stackSize = thread->stack_size;
1424 info->gpReg = (void *)thread->gp;
1425 info->attr = thread->attr;
1426 info->option = thread->option;
1427
1428 if (thread->status == THS_WAIT) {
1429 info->waitType = thread->wait_type;
1430 if (thread->wait_type == TSW_DELAY) {
1431 info->waitId = thread->wait_usecs;
1432 } else {
1433 switch (thread->wait_type) {
1434 case TSW_SEMA:
1435 info->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct semaphore, event));
1436 break;
1437 case TSW_EVENTFLAG:
1438 info->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct event_flag, event));
1439 break;
1440 case TSW_MBX:
1441 info->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct mbox, event));
1442 break;
1443 case TSW_FPL:
1444 info->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct fpool, event));
1445 break;
1446 case TSW_VPL:
1447 info->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct vpool, event));
1448 break;
1449 default:
1450 // shouldn't happen
1451 break;
1452 }
1453 }
1454 }
1455
1456 info->wakeupCount = thread->wakeup_count;
1457 if (thread->status == THS_DORMANT || thread->status == THS_RUN) {
1458 info->regContext = 0;
1459 } else {
1460 info->regContext = (long *)thread->saved_regs;
1461 }
1462}
1463
1464static void thread_get_run_stats(struct thread *thread, iop_thread_run_status_t *stat)
1465{
1466 stat->status = thread->status;
1467 stat->currentPriority = thread->priority;
1468
1469 if (thread->status == THS_WAIT) {
1470 stat->waitType = thread->wait_type;
1471 if (thread->wait_type == TSW_DELAY) {
1472 stat->waitId = thread->wait_usecs;
1473 } else {
1474 switch (thread->wait_type) {
1475 case TSW_SEMA:
1476 stat->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct semaphore, event));
1477 break;
1478 case TSW_EVENTFLAG:
1479 stat->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct event_flag, event));
1480 break;
1481 case TSW_MBX:
1482 stat->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct mbox, event));
1483 break;
1484 case TSW_FPL:
1485 stat->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct fpool, event));
1486 break;
1487 case TSW_VPL:
1488 stat->waitId = MAKE_HANDLE(container_of(thread->wait_event, struct vpool, event));
1489 break;
1490 default:
1491 // shouldn't happen
1492 break;
1493 }
1494 }
1495 }
1496
1497 stat->wakeupCount = thread->wakeup_count;
1498 if (thread->status != THS_DORMANT && thread->status != THS_RUN) {
1499 stat->regContext = (long *)thread->saved_regs;
1500 }
1501
1502 stat->runClocks.hi = thread->run_clocks_hi;
1503 stat->runClocks.lo = thread->run_clocks_lo;
1504 stat->intrPreemptCount = thread->irq_preemption_count;
1505 stat->threadPreemptCount = thread->thread_preemption_count;
1506 stat->releaseCount = thread->release_count;
1507}
int CpuResumeIntr(int state)
Definition intrman.c:227
int CpuInvokeInKmode(void *function,...)
int QueryIntrContext(void)
int CpuSuspendIntr(int *state)
Definition intrman.c:205
#define THS_RUN
Definition kernel.h:244
Definition alarm.c:28