11#include "irx_imports.h"
20IRX_ID(
"IOP_SIF_manager", 2, 5);
34 void (*func)(
void *userdata);
57 void (*dma_intr_handler)(
void *userdata);
58 void *dma_intr_handler_userdata;
67static u32 sif_dma2_inited = 0;
68static u32 sif_inited = 0;
75 __asm__ volatile("mfc0 %0, " #reg : "=r"(val)); \
79#define mfc0(reg) _mfc0(reg)
81int _start(
int ac,
char **av)
84 USE_IOP_MMIO_HWPORT();
85 USE_SIF_MMIO_HWPORT();
93 if ( (iop_mmio_hwport->iop_sbus_ctrl[0] & 8) == 0 )
95 if ( sif_mmio_hwport->unk60 == 0x1D000060 )
96 return RegisterLibraryEntries(&_exp_sifman) != 0;
97 if ( (sif_mmio_hwport->unk60 & 0xFFFFF000) == 0 )
98 return RegisterLibraryEntries(&_exp_sifman) != 0;
104static u32 get_msflag()
107 USE_SIF_MMIO_HWPORT();
109 for ( result = sif_mmio_hwport->msflag; result != sif_mmio_hwport->msflag; result = sif_mmio_hwport->msflag )
114static u32 get_smflag()
117 USE_SIF_MMIO_HWPORT();
119 for ( result = sif_mmio_hwport->smflag; result != sif_mmio_hwport->smflag; result = sif_mmio_hwport->smflag )
126 USE_IOP_MMIO_HWPORT();
128 if ( !sif_dma2_inited )
130 iop_mmio_hwport->dmac1.oldch[2].chcr = 0;
131 iop_mmio_hwport->dmac1.dpcr1 |= 0x800;
136static int sif_dma_init(
void);
142 USE_IOP_MMIO_HWPORT();
143 USE_SIF_MMIO_HWPORT();
147 iop_mmio_hwport->dmac2.dpcr2 |= 0x8800;
148 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
149 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
151 if ( (iop_mmio_hwport->iop_sbus_ctrl[0] & 0x10) != 0 )
153 iop_mmio_hwport->iop_sbus_ctrl[0] |= 0x10;
155 iop_mmio_hwport->iop_sbus_ctrl[0] |= 1;
160 msflag = get_msflag();
173 USE_IOP_MMIO_HWPORT();
177 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
178 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
179 if ( iop_mmio_hwport->iop_sbus_ctrl[0] & 0x10 )
181 iop_mmio_hwport->iop_sbus_ctrl[0] |= 0x10;
191void sceSifSetDChain()
193 USE_IOP_MMIO_HWPORT();
194 USE_SIF_MMIO_HWPORT();
196 if ( (sif_mmio_hwport->controlreg & 0x40) == 0 )
197 sif_mmio_hwport->controlreg = 64;
198 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
199 iop_mmio_hwport->dmac2.newch[3].bcr = (iop_mmio_hwport->dmac2.newch[3].bcr & 0xFFFF0000) | 32;
200 iop_mmio_hwport->dmac2.newch[3].chcr = 0x41000300;
203void sceSifSetDmaIntrHandler(
void (*handler)(
void *userdata),
void *arg)
205 sifman_internals.dma_intr_handler = handler;
206 sifman_internals.dma_intr_handler_userdata = arg;
209void sceSifResetDmaIntrHandler()
211 sifman_internals.dma_intr_handler = NULL;
212 sifman_internals.dma_intr_handler_userdata = NULL;
217 void (*dma_intr_handler)(
void *);
221 USE_IOP_MMIO_HWPORT();
222 USE_SIF_MMIO_HWPORT();
224 dma_intr_handler = smi->dma_intr_handler;
225 if ( dma_intr_handler )
226 dma_intr_handler(smi->dma_intr_handler_userdata);
227 sif_otherbufcom = smi->sif_otherbufcom;
229 if ( sif_otherbufcom->count > 0 )
237 sif_otherbufcom->info[v5].func(sif_otherbufcom->info[v5].userdata);
238 sif_otherbufcom = smi->sif_otherbufcom;
240 }
while ( v4 < sif_otherbufcom->
count );
242 smi->sif_otherbufcom->count = 0;
243 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 && smi->dmatag_index > 0 )
245 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
246 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)(smi->sif_curbuf);
247 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
248 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
249 sif_mmio_hwport->controlreg = 32;
251 smi->dmatag_index = 0;
252 if ( smi->sif_curbuf == smi->sif_buf1 )
254 smi->sif_curbuf = smi->sif_buf2;
255 smi->sif_curbufcom = &smi->sif_bufcom2;
256 p_sif_bufcom1 = &smi->sif_bufcom1;
260 smi->sif_curbufcom = &smi->sif_bufcom1;
261 p_sif_bufcom1 = &smi->sif_bufcom2;
262 smi->sif_curbuf = smi->sif_buf1;
264 smi->sif_otherbufcom = p_sif_bufcom1;
265 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
270static int sif_dma_init(
void)
274 sifman_internals.sif_curbuf = sifman_internals.sif_buf1;
275 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom1;
276 sifman_internals.dmatag_index = 0;
277 sifman_internals.sif_bufcom1.count = 0;
278 sifman_internals.sif_bufcom2.count = 0;
279 sifman_internals.sif_otherbufcom = &sifman_internals.sif_bufcom2;
280 sifman_internals.dma_intr_handler = 0;
281 sifman_internals.dma_intr_handler_userdata = 0;
283 RegisterIntrHandler(IOP_IRQ_DMA_SIF0, 1, (
int (*)(
void *))sifman_interrupt_handler, &sifman_internals);
296 v1 = &sifman_internals.sif_curbuf[sifman_internals.dmatag_index];
297 v2 = (int)a1->src & 0xFFFFFF;
301 if ( (a1->attr & 2) != 0 )
302 v1->data = v2 | 0x40000000;
303 v1->words = v4 & 0xFFFFFF;
307 v1->count = v5 | 0x10000000;
308 if ( (a1->attr & 4) != 0 )
309 v1->count |= 0x80000000;
310 v1->addr = (int)a1->dest & 0x1FFFFFFF;
311 return ++sifman_internals.dmatag_index;
321 USE_IOP_MMIO_HWPORT();
322 USE_SIF_MMIO_HWPORT();
324 if ( 32 - sifman_internals.dmatag_index <
count )
326 dmatag_index = sifman_internals.dmatag_index;
327 dma_count = sifman_internals.dma_count;
328 if ( sifman_internals.dmatag_index )
329 sifman_internals.sif_curbuf[sifman_internals.dmatag_index - 1].data &= ~0x80000000;
330 for ( i = 0; i <
count; ++dmat )
332 sif_dma_setup_tag(dmat);
335 sifman_internals.sif_curbuf[sifman_internals.dmatag_index - 1].data |= 0x80000000;
338 sifman_internals.sif_curbufcom->info[sifman_internals.sif_curbufcom->count].func = func;
339 sifman_internals.sif_curbufcom->info[sifman_internals.sif_curbufcom->count++].userdata = data;
341 v14 = dma_count << 16;
342 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 )
344 v14 = dma_count << 16;
345 if ( iop_mmio_hwport->dmac2.new_unusedch.madr == 0 )
347 v14 = dma_count << 16;
348 if ( (iop_mmio_hwport->dmac2.dicr2 & 0x4000000) == 0 )
350 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
351 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)(sifman_internals.sif_curbuf);
352 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
353 sif_mmio_hwport->controlreg = 32;
354 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
355 sifman_internals.dmatag_index = 0;
356 ++sifman_internals.dma_count;
357 if ( sifman_internals.sif_curbuf == sifman_internals.sif_buf1 )
359 sifman_internals.sif_curbuf = sifman_internals.sif_buf2;
360 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom2;
361 p_sif_bufcom1 = &sifman_internals.sif_bufcom1;
365 sifman_internals.sif_curbuf = sifman_internals.sif_buf1;
366 sifman_internals.sif_curbufcom = &sifman_internals.sif_bufcom1;
367 p_sif_bufcom1 = &sifman_internals.sif_bufcom2;
369 sifman_internals.sif_otherbufcom = p_sif_bufcom1;
370 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
371 v14 = dma_count << 16;
375 return v14 | (dmatag_index << 8) | (u8)
count;
380 return set_dma_inner(dmat,
count, 0, 0);
383unsigned int sceSifSetDmaIntr(
SifDmaTransfer_t *dmat,
int len,
void (*func)(),
void *data)
385 return set_dma_inner(dmat, len, func, data);
388static int dma_stat_inner(
unsigned int a1)
390 USE_IOP_MMIO_HWPORT();
392 if ( (iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000) == 0 && !iop_mmio_hwport->dmac2.new_unusedch.madr )
394 if ( (iop_mmio_hwport->dmac2.dicr2 & 0x4000000) == 0 )
397 if ( sifman_internals.dma_count != ((a1 >> 16) & 0xFFFF) )
399 if ( sifman_internals.dma_count == (u16)(((a1 >> 16) & 0xFFFF) + 1) )
406int sceSifDmaStat(
int trid)
412 return dma_stat_inner(trid);
414 v2 = dma_stat_inner(trid);
424 USE_IOP_MMIO_HWPORT();
425 USE_SIF_MMIO_HWPORT();
427 v1 = ((int)dmat.src & 0xFFFFFF) | 0x80000000;
428 v2 = ((
unsigned int)dmat.size >> 2) + ((dmat.size & 3) != 0);
429 sifman_internals.one.data = v1;
430 sifman_internals.one.words = v2 & 0xFFFFFF;
431 if ( (dmat.attr & 2) != 0 )
432 sifman_internals.one.data = v1 | 0x40000000;
436 sifman_internals.one.count = v3 | 0x10000000;
437 if ( (dmat.attr & 4) != 0 )
438 sifman_internals.one.count |= 0x80000000;
439 sifman_internals.one.addr = (int)dmat.dest & 0xFFFFFFF;
440 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
441 sif_mmio_hwport->controlreg = 32;
442 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
443 iop_mmio_hwport->dmac2.newch[2].tadr = (uiptr)&sifman_internals.one;
444 iop_mmio_hwport->dmac2.newch[2].bcr = (iop_mmio_hwport->dmac2.newch[2].bcr & 0xFFFF0000) | 32;
445 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000701;
450 USE_IOP_MMIO_HWPORT();
452 while ( iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000 )
458 USE_IOP_MMIO_HWPORT();
460 return iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000;
463void sceSifDma0Transfer(
void *addr,
int size,
int mode)
467 USE_IOP_MMIO_HWPORT();
468 USE_SIF_MMIO_HWPORT();
472 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
473 if ( (sif_mmio_hwport->controlreg & 0x20) == 0 )
474 sif_mmio_hwport->controlreg = 32;
475 iop_mmio_hwport->dmac2.newch[2].chcr = 0;
476 iop_mmio_hwport->dmac2.newch[2].madr = (
unsigned int)addr & 0xFFFFFF;
477 if ( (v4 & 0x1F) != 0 )
481 iop_mmio_hwport->dmac2.newch[2].bcr = ((v5 & 0xFFFF) << 16) | 32;
482 iop_mmio_hwport->dmac2.newch[2].chcr = 0x1000201;
487 USE_IOP_MMIO_HWPORT();
489 while ( iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000 )
493int sceSifDma0Sending()
495 USE_IOP_MMIO_HWPORT();
497 return iop_mmio_hwport->dmac2.newch[2].chcr & 0x1000000;
500void sceSifDma1Transfer(
void *addr,
int size,
int mode)
505 USE_IOP_MMIO_HWPORT();
506 USE_SIF_MMIO_HWPORT();
508 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
509 if ( (sif_mmio_hwport->controlreg & 0x40) == 0 )
510 sif_mmio_hwport->controlreg = 64;
511 iop_mmio_hwport->dmac2.newch[3].chcr = 0;
512 iop_mmio_hwport->dmac2.newch[3].madr = (
unsigned int)addr & 0xFFFFFF;
513 if ( (v4 & 0x1F) != 0 )
517 iop_mmio_hwport->dmac2.newch[3].bcr = ((v5 & 0xFFFF) << 16) | 32;
518 if ( (mode & 0x10) != 0 )
522 iop_mmio_hwport->dmac2.newch[3].chcr = v6 | 0x200;
527 USE_IOP_MMIO_HWPORT();
529 while ( iop_mmio_hwport->dmac2.newch[3].chcr & 0x1000000 )
533int sceSifDma1Sending()
535 USE_IOP_MMIO_HWPORT();
537 return iop_mmio_hwport->dmac2.newch[3].chcr & 0x1000000;
540void sceSifDma2Transfer(
void *addr,
int size,
int mode)
546 USE_IOP_MMIO_HWPORT();
547 USE_SIF_MMIO_HWPORT();
549 v4 = ((
unsigned int)size >> 2) + ((size & 3) != 0);
550 if ( (sif_mmio_hwport->controlreg & 0x80) == 0 )
551 sif_mmio_hwport->controlreg = 128;
553 iop_mmio_hwport->dmac1.oldch[2].chcr = 0;
554 iop_mmio_hwport->dmac1.oldch[2].madr = (
unsigned int)addr & 0xFFFFFF;
557 if ( (v4 & 0x1F) != 0 )
561 iop_mmio_hwport->dmac1.oldch[2].bcr = ((v6 & 0xFFFF) << 16) | (v5 & 0xFFFF);
562 if ( (mode & 1) != 0 )
570 if ( (mode & 0x10) != 0 )
576 iop_mmio_hwport->dmac1.oldch[2].chcr = v7;
581 USE_IOP_MMIO_HWPORT();
583 while ( iop_mmio_hwport->dmac1.oldch[2].chcr & 0x1000000 )
587int sceSifDma2Sending()
589 USE_IOP_MMIO_HWPORT();
591 return iop_mmio_hwport->dmac1.oldch[2].chcr & 0x1000000;
599u32 sceSifSetMSFlag(u32 val)
601 USE_SIF_MMIO_HWPORT();
603 sif_mmio_hwport->msflag = val;
612u32 sceSifSetSMFlag(u32 val)
614 USE_SIF_MMIO_HWPORT();
616 sif_mmio_hwport->smflag = val;
620u32 sceSifGetMainAddr()
622 USE_SIF_MMIO_HWPORT();
624 return sif_mmio_hwport->mscom;
627u32 sceSifGetSubAddr()
629 USE_SIF_MMIO_HWPORT();
631 return sif_mmio_hwport->smcom;
634u32 sceSifSetSubAddr(u32 addr)
636 USE_SIF_MMIO_HWPORT();
638 sif_mmio_hwport->smcom = addr;
639 return sif_mmio_hwport->smcom;
645 USE_IOP_MMIO_HWPORT();
647 v0 = iop_mmio_hwport->iop_sbus_ctrl[0];
648 iop_mmio_hwport->iop_sbus_ctrl[0] = v0 | 2;
650 iop_mmio_hwport->iop_sbus_ctrl[0] = v0 & 0xFFFFFFFD;
int CpuResumeIntr(int state)
int RegisterIntrHandler(int irq, int mode, int(*handler)(void *), void *arg)
int ReleaseIntrHandler(int irq)
int QueryIntrContext(void)
int DisableIntr(int irq, int *res)
int CpuSuspendIntr(int *state)
u32 count
start sector of fragmented bd/file