11#include "accore_internal.h"
15static int dma_cancel(
struct dma_softc *dmac,
acDmaT dma,
int intr,
int result)
24 d_state = dma->d_state;
27 if ( (
unsigned int)d_state <= 3u )
33 q_next = dma->d_chain.q_next;
34 q_prev = dma->d_chain.q_prev;
35 q_prev->q_next = q_next;
36 q_next->q_prev = q_prev;
54 ops->do_error(dma, intr, (acDmaState)d_state, result);
59static int dma_xfer(
acDmaT dma,
void *ioptr,
void *buf,
int count)
68 if (
count >= 4 << slice )
70 ret =
count >> (slice + 2);
71 slice_v2 = 1 << slice;
79 if ( dma == (
acDmaT)Dmac.requestq.q_next && dma->d_state == 2 )
89 v12 = GetAcIoDelayReg() & 0x80FFDFFF;
91 if ( ioptr == (
void *)0xB6000000 )
93 SetAcIoDelayReg(v12 | v13);
94 dmac_ch_set_dpcr(8u, (
unsigned int)attr >> 5);
96 v14 = dmac_request(8u, buf, slice_v2, ret, attr & 1);
99 *((
volatile acUint32 *)0x1F801410) = ((
unsigned int)ioptr & 0x3FFFFFF) | 0x14000000;
134 d_state = dma->d_state;
140 q_next = (
acDmaT)dmac->requestq.q_next;
141 d_state = AC_DMA_STATE_READY;
142 if ( dmac == (
struct dma_softc *)q_next )
144 d_state = AC_DMA_STATE_FREE;
148 dma = (
acDmaT)dmac->requestq.q_next;
158 if ( d_state == AC_DMA_STATE_QUEUE )
166 if ( d_state != AC_DMA_STATE_READY )
169 if ( d_state == AC_DMA_STATE_XFER )
182 v10 = dmac->requestq.q_next;
183 state_v5 = (
acDmaT)dmac->requestq.q_prev;
184 dma->d_chain.q_next = &dmac->requestq;
185 v12 = (
unsigned int)dmac ^ (
unsigned int)v10;
188 dma->d_chain.q_prev = (
acQueueT)state_v5;
189 state_v5->d_chain.q_next = &dma->d_chain;
190 dmac->requestq.q_prev = &dma->d_chain;
200 if ( dma == (
acDmaT)dmac->requestq.q_next )
210 ret = ops->do_xfer(dma, intr, dma_xfer);
212 if ( dma->d_state == 3 )
228 v13 = dma->d_state + ret;
236 dma_cancel(dmac, dma, intr, ret);
241int acDmaRequest(
acDmaT dma)
245 ret = dma_request(&Dmac, dma, 0);
248 while ( dma_request(&Dmac, 0, 0) < 0 )
254int acDmaRequestI(
acDmaT dma)
258 ret = dma_request(&Dmac, dma, 1);
261 while ( dma_request(&Dmac, 0, 1) < 0 )
267int acDmaCancel(
acDmaT dma,
int result)
271 ret = dma_cancel(&Dmac, dma, 0, result);
274 while ( dma_request(&Dmac, 0, 0) < 0 )
280int acDmaCancelI(
acDmaT dma,
int result)
284 ret = dma_cancel(&Dmac, dma, 1, result);
287 while ( dma_request(&Dmac, 0, 1) < 0 )
293static int dma_intr(
void *arg)
304 q_next = (
acDmaData *)argt->requestq.q_next;
305 if ( argt != (
struct dma_softc *)q_next )
310 v3 = q_next->d_chain.q_next;
311 q_prev = q_next->d_chain.q_prev;
313 state = q_next->d_state;
317 if ( state == AC_DMA_STATE_XFER )
318 ops->do_done(q_next);
320 ops->do_error(q_next, 1, state, -13);
322 while ( dma_request(&Dmac, 0, 1) < 0 )
336 dma->d_attr = (32 * priority) | (output != 0);
342 while ( ((
unsigned int)1 << v6) < x )
354int acDmaModuleStart(
int argc,
char **argv)
365 Dmac.requestq.q_prev = (
acQueueT)&Dmac;
366 Dmac.requestq.q_next = (
acQueueT)&Dmac;
368 if ( !ret || ret == -104 )
378 msg =
"dma_intr_enable";
382 msg =
"dma_intr_register";
384 printf(
"accore:dma_init:%s: error %d\n", msg, ret);
401int acDmaModuleRestart(
int argc,
char **argv)
408int acDmaModuleStatus()
int CpuResumeIntr(int state)
int RegisterIntrHandler(int irq, int mode, int(*handler)(void *), void *arg)
int ReleaseIntrHandler(int irq)
int DisableIntr(int irq, int *res)
int CpuSuspendIntr(int *state)
u32 count
start sector of fragmented bd/file