diff -uprN prev/drivers/char/ppdev.c new/drivers/char/ppdev.c --- prev/drivers/char/ppdev.c 2012-10-14 23:41:04.000000000 +0200 +++ new/drivers/char/ppdev.c 2012-10-16 16:03:18.000000000 +0200 @@ -104,6 +104,300 @@ static inline void pp_enable_irq (struct port->ops->enable_irq (port); } +struct pp_ki +{ + int len_real; + void * buffer; + struct kiocb *cb; + const struct iovec *iov; + struct parport *pport; + unsigned long count; +}; + + +/** + * pp_aio_cancel - cancel an async parport_read/write + * Context: !in_interrupt() + * + * May fail with -EAGAIN if transfer has already started + */ +static int pp_aio_cancel(struct kiocb *iocb, struct io_event *e) +{ + struct pp_ki * u=iocb->private; + e->res=0; + e->res2=-EAGAIN; + if(!u->pport->ops->cancel_transaction(u->pport, u->buffer)) + return -EAGAIN; + e->res=0; + e->res2=-ECANCELED; + kfree(u->buffer); + kfree(u); + return 0; +} + +/** + * pp_aio_read_retry - entry point for copying and returning read data + * + * The aio core sets mm context up so that copy_to_user works as expected here. + */ +static ssize_t pp_aio_read_retry(struct kiocb *iocb) +{ + struct pp_ki * u=iocb->private; + ssize_t len, total; + void *to_copy; + int i; + + total = u->len_real; + len = 0; + to_copy = u->buffer; + for (i=0; i < u->count; i++) { + ssize_t this = min((ssize_t)(u->iov[i].iov_len), total); + + if (copy_to_user(u->iov[i].iov_base, to_copy, this)) { + if (len == 0) + len = -EFAULT; + break; + } + + total -= this; + len += this; + to_copy += this; + if (total == 0) + break; + } + kfree(u->buffer); + kfree(u); + return len; +} + +/** + * pp_aio_read_cb - gets called by the parport driver + * + */ +static void pp_aio_read_cb(int *data,void * buffer,int len_real) +{ + struct pp_ki * u=(struct pp_ki *)data; + struct kiocb * cb=u->cb; + u->len_real=len_real; + cb->private=u; + kick_iocb(cb); +} + +/** + * pp_aio_read - submits an async read request + * @iocb: the aio control block to work on + * @iov: io vector to fill + * @count: size of the io vector + * @o: file offset. ignored + * Context: !in_interrupt() + * + * Uses the async parport interface. Requires the peripheral to use ECP handshakes. + * Submits the request to the lowlevel parport driver, + * sets the retry function and returns -EIOCBRETRY. + * The notification callback pp_aio_read_cb() will later kick_iocb() to + * retry and return the read data. + * May return -ENOMEM if allocation of dma buffer or user data fails. + * May read less data than asked to if DMA memory is low or the request is too big. + * May return other errors if parport_submit_transaction() fails fo any reason. + * Page Migration would be nice. + */ +static ssize_t pp_aio_read (struct kiocb * iocb, const struct iovec * iov, + unsigned long count, loff_t o) +{ + struct file* file; + struct pp_struct * pp; + struct parport *pport; + struct pp_ki * u; + char * bufptr; + int len,rv; + file=iocb->ki_filp; + if(!file) + return -EBADF; + pp=file->private_data; + if(!pp) + return -EBADF; + pport=pp->pdev->port; + if(!pport) + return -EBADF; + + if (!(pp->flags & PP_CLAIMED)) { + /* Don't have the port claimed */ + printk (KERN_DEBUG CHRDEV " claim the port first\n"); + return -EINVAL; + } + + if(!pport->ops->submit_transaction) + { + return -ENOSYS; + } + + len=iov_length(iov,count); + bufptr=kmalloc(len,GFP_DMA); + //degrade gracefully + while((len>PAGE_SIZE)&&(!bufptr)) + { + len/=2; + if(lenbuffer=bufptr; + u->cb=iocb; + u->iov=iov; + u->count=count; + u->pport=pport; + iocb->private=u; + iocb->ki_cancel = pp_aio_cancel; + iocb->ki_retry=pp_aio_read_retry; + + rv=pport->ops->submit_transaction(pport, bufptr, len, 0, PARPORT_AIO_ACT_ECP_R_D, (int *)u, pp_aio_read_cb); + if(rv) + { + kfree(bufptr); + kfree(u); + iocb->private=NULL; + return rv; + } + return -EIOCBRETRY; +} + +/** + * pp_aio_write_cb - gets called by the parport driver + * + */ +static void pp_aio_write_cb(int *data,void * buf,int len_real) +{ + struct pp_ki * u=(struct pp_ki *)data; + struct kiocb * cb=u->cb; + u->len_real=len_real; + cb->private = NULL; + aio_complete(cb, len_real, 0); + kfree(u->buffer); + kfree(u); +} + +/** + * pp_aio_write - submits an async write request + * @iocb: the aio control block to work on + * @iov: io vector to fill + * @count: size of the io vector + * @o: file offset. ignored + * Context: !in_interrupt() + * + * very similar to pp_aio_read(). + * Does not use or need a retry. + */ +static ssize_t pp_aio_write (struct kiocb * iocb, const struct iovec * iov, + unsigned long count, loff_t o) +{ + struct file* file; + struct pp_struct * pp; + struct parport *pport; + struct pp_ki * u; + ssize_t len, total; + void *to_copy; + int i; + char * bufptr; + int rv; + file=iocb->ki_filp; + if(!file) + return -EBADF; + pp=file->private_data; + if(!pp) + return -EBADF; + pport=pp->pdev->port; + if(!pport) + return -EBADF; + + if (!(pp->flags & PP_CLAIMED)) { + /* Don't have the port claimed */ + printk (KERN_DEBUG CHRDEV " claim the port first\n"); + return -EINVAL; + } + + if(!pport->ops->submit_transaction) + { + return -ENOSYS; + } + + len=iov_length(iov,count); + bufptr=kmalloc(len,GFP_DMA); + //degrade gracefully + while((len>PAGE_SIZE)&&(!bufptr)) + { + len/=2; + if(lenbuffer=bufptr; + u->cb=iocb; + u->iov=iov; + u->count=count; + u->pport=pport; + iocb->private=u; + iocb->ki_cancel = pp_aio_cancel; + iocb->ki_retry=pp_aio_read_retry; + + total = len; + len = 0; + to_copy = u->buffer; + for (i=0; i < u->count; i++) { + ssize_t this = min((ssize_t)(u->iov[i].iov_len), total); + + if (copy_from_user(to_copy, u->iov[i].iov_base, this)) { + if (len == 0) + len = -EFAULT; + break; + } + + total -= this; + len += this; + to_copy += this; + if (total == 0) + break; + } + + if(len<0) + { + kfree(bufptr); + kfree(u); + iocb->private=NULL; + return len; + } + + rv=pport->ops->submit_transaction(pport, bufptr, len, 0, PARPORT_AIO_ACT_ECP_W_D, (int *)u, pp_aio_write_cb); + if(rv) + { + kfree(bufptr); + kfree(u); + iocb->private=NULL; + return rv; + } + + return -EIOCBQUEUED; +} + static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { @@ -750,6 +1044,8 @@ static const struct file_operations pp_f .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, + .aio_read = pp_aio_read, + .aio_write = pp_aio_write, }; static void pp_attach(struct parport *port) diff -uprN prev/drivers/parport/parport_pc.c new/drivers/parport/parport_pc.c --- prev/drivers/parport/parport_pc.c 2012-10-14 23:41:04.000000000 +0200 +++ new/drivers/parport/parport_pc.c 2012-10-18 07:16:58.000000000 +0200 @@ -892,6 +892,777 @@ static size_t parport_pc_ecp_write_block return written; } + +static int parport_end_dma(struct parport *port,struct parport_aio_te *te); +static int parport_finish_dma(struct parport *port,struct parport_aio_te *te); +static int parport_init_dma(struct parport *port,struct parport_aio_te *te); +static int parport_start_dma(struct parport *port,struct parport_aio_te *te); + +/* ecp_forward_to_reverse - reverses link in ECP mode. AUTOFD STROBE and SELECTIN DCR Bits should be zero before doing this + +Extended Capabilities Port: Specifications +Revision 1.06 +July 14, 1993 +Microsoft Corporation + +page 34: + +1. Complete the current forward transfer. +2. Place the ECP port into PS2 mode (001). +3. Set the direction bit to 1 (reverse), causing the ECP port data drivers to tri-state. +4. Set the ECP port into ECP mode (011), enabling the hardware assist. +5. Write to the DCR, causing nInit to go low. This requests a reverse transfer from the + peripheral. +6. The peripheral will drive pe low when it has started the reverse transfer. Hardware will + automatically move data into the ECP FIFO from the ECP data lines. +7. Set up a ReadString or execute a ReadByte operation. + +number 1 and 2 should be done before calling this function +*/ +static +int ecp_forward_to_reverse (struct parport *port) +{ + int retval; + +/*3. Set the direction bit to 1 (reverse), causing the ECP port data drivers to tri-state.*/ + parport_data_reverse (port); + +/*4. Set the ECP port into ECP mode (011), enabling the hardware assist.*/ + frob_econtrol (port, (7<<5), (ECR_ECP<<5)); + +/*5. Write to the DCR, causing nInit to go low. This requests a reverse transfer from the + peripheral.*/ + parport_frob_control (port, + PARPORT_CONTROL_INIT, + 0); + +/*6. The peripheral will drive pe low when it has started the reverse transfer. Hardware will + automatically move data into the ECP FIFO from the ECP data lines.*/ + retval = parport_wait_peripheral (port, + PARPORT_STATUS_PAPEROUT, 0); + + if (!retval) { + DPRINTK (KERN_DEBUG "%s: ECP direction: reverse\n", + port->name); + port->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } else { + parport_data_forward (port); + DPRINTK (KERN_DEBUG "%s: ECP direction: failed to reverse\n", + port->name); + port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN; + } + + return retval; +} + +/* ecp_reverse_to_forward - reverses link in ECP mode. AUTOFD STROBE and SELECTIN DCR Bits should be zero before doing this + +Extended Capabilities Port: Specifications +Revision 1.06 +July 14, 1993 +Microsoft Corporation + +page 36: + +Reverse to Forward Negotiation +After the ECP port has moved data in ECP mode (011) in the reverse direction and a change of +direction is required, the following steps must be taken: +1. First, negotiate the state of the ECP port (the peripheral) back into forward mode. This is + done by setting nInit high and waiting for the state of pe go high. This causes the peripheral + to terminate any ongoing reverse transfer. +2. The mode of the ECP port is changed to PS2 mode 001. +3. The direction bit is changed to 0. At this point, the bus and the ECP port are in the + forward-idle state. +*/ + +static +int ecp_reverse_to_forward (struct parport *port) +{ + int retval; + + /* 1. First, negotiate the state of the ECP port (the peripheral) back into forward mode. This is + done by setting nInit high and waiting for the state of pe go high. This causes the peripheral + to terminate any ongoing reverse transfer. */ + parport_frob_control (port, + PARPORT_CONTROL_INIT, + PARPORT_CONTROL_INIT); + + retval = parport_wait_peripheral (port, + PARPORT_STATUS_PAPEROUT, + PARPORT_STATUS_PAPEROUT); + + if (!retval) { + /* 2. The mode of the ECP port is changed to PS2 mode 001. */ + frob_econtrol (port, (7<<5), (ECR_PS2<<5)); + + /* 3. The direction bit is changed to 0. At this point, the bus and the ECP port are in the + forward-idle state. */ + parport_data_forward (port); + DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n", + port->name); + port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; + } else { + DPRINTK (KERN_DEBUG + "%s: ECP direction: failed to switch forward\n", + port->name); + port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN; + } + + + return retval; +} + +/** + * wait_ecr - wait for flag(s) to change + * @port the port to use + * @which which bits to check + * @state state to wait for + * @timeout how long to wait in jiffies + * will busy wait for at least 1000 microseconds + */ +static int wait_ecr(struct parport *port, unsigned char which, unsigned char state, unsigned long timeout) +{ + unsigned char st; + unsigned long deadline; + unsigned long ctr;//timeout for busy waiting + deadline=jiffies+timeout; + st=inb (ECONTROL (port)); + st&=which; + + ctr=0; + while(ctr<1000) + { + if(st==state) + return 0; + udelay(1); + ctr++; + st=inb (ECONTROL (port)); + st&=which; + } + + while(st!=state) + { + DPRINTK( KERN_DEBUG "waiting\n"); + if(time_after (jiffies, deadline)) + return 1; + schedule_timeout_uninterruptible(1); + st=inb (ECONTROL (port)); + st&=which; + } + return 0; +} + + +/** + * set_dir - changes direction appropriately + * @port the port to work on + * @te the transaction to set up direction for + * Context: !in_interrupt() + * + * reverses the link direction according to the action code of the te. + * may sleep. + * Returns -EIO on error (peripheral timeout). 0 on Success + */ +static int set_dir(struct parport *port,struct parport_aio_te *te) +{ +// struct parport_pc_private *priv = port->physport->private_data; + if(te->flags&PARPORT_AIO_FLG_NOSETUP) + return 0; + + + if((inb (ECONTROL (port))&(7<<5))!=(ECR_ECP<<5))//to be certain state machine starts + { + parport_write_control(port,parport_read_control(port)&~(0x0B)); + } + + if(te->action==PARPORT_AIO_ACT_ECP_W_D) + { + DPRINTK (KERN_DEBUG " te->action==PARPORT_AIO_ACT_ECP_W_D\n"); + if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) + {//we don't try to drain fifo here. + DPRINTK (KERN_DEBUG " ecp_reverse_to_forward (port)\n"); + if (ecp_reverse_to_forward (port)) + { + DPRINTK (KERN_DEBUG " failed\n"); + return -EIO; + } + } + port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; + } + else + { + DPRINTK (KERN_DEBUG " te->action!=PARPORT_AIO_ACT_ECP_W_D\n"); + if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE) + { + unsigned char ectr; + DPRINTK (KERN_DEBUG " ecp_forward_to_reverse (port)\n"); + ectr = inb (ECONTROL (port)); + if((port->ieee1284.phase == IEEE1284_PH_FWD_IDLE)&&((ectr&(7<<5))==(ECR_ECP<<5))) + {//we just wrote in ECP mode. wait some time for fifo to go empty before reversing direction. If it does not work, give warning but continue. + const unsigned long FLUSH_DELAY=4*HZ/100; + const unsigned char FIFO_EMPTY=0x01; + if(wait_ecr(port,FIFO_EMPTY,FIFO_EMPTY,FLUSH_DELAY)) + printk(KERN_WARNING "Couldn't flush fifo post-write in %lu jiffies. Continuing anyway.\n", (unsigned long)FLUSH_DELAY); + } + parport_wait_peripheral (port, + PARPORT_STATUS_BUSY, + PARPORT_STATUS_BUSY); + + frob_econtrol (port, (7<<5), (ECR_PS2<<5)); + if (ecp_forward_to_reverse (port)) + { + DPRINTK (KERN_DEBUG " failed\n"); + return -EIO; + } + } + port->ieee1284.phase = IEEE1284_PH_REV_IDLE; + } + return 0; +} + +/** + * parport_aio_done - called from interrupt context on dma block completion. + * @port the port to work on + * + */ +void parport_aio_done(struct parport *port) +{ + unsigned long flags; + unsigned long dmaflag,count,residue; + struct parport_aio_te *te; + struct list_head * l; + struct parport_pc_private *priv = port->physport->private_data; + if(!(inb (ECONTROL (port)) & (1<<2))) + { + return; + } + + spin_lock_irqsave(&(priv->aio_lock),flags); + if(!priv->dma_active) + { + DPRINTK (KERN_DEBUG " dma_not_active\n"); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + return; + } + + outb (ECONTROL (port),0x74); + + if(!del_timer(&priv->aio_action_timeout))//if we are too late + { + spin_unlock_irqrestore(&(priv->aio_lock),flags); + return; + } + DPRINTK (KERN_DEBUG " parport_aio_done Ok\n"); + l=(struct list_head *)&(priv->aio_action_list); + if(list_empty(l)) + { + DPRINTK (KERN_DEBUG " list empty\n"); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + return; + } + + DPRINTK (KERN_DEBUG " first entry\n"); + te=list_first_entry(l,struct parport_aio_te,aio_action_list); + + dmaflag = claim_dma_lock(); + + DPRINTK (KERN_DEBUG " disable\n"); + disable_dma(port->dma); + residue = get_dma_residue(port->dma); + DPRINTK (KERN_DEBUG " residue: %lu\n",residue); + + release_dma_lock(dmaflag); + count=priv->dma_blocksize-residue; + + if((priv->size_done+count)size_to_transfer)//another one + { + DPRINTK (KERN_DEBUG " end dma\n"); + parport_end_dma(port,te); + DPRINTK (KERN_DEBUG " another one\n"); + parport_start_dma(port,te); + } + else//next te + { + DPRINTK (KERN_DEBUG " schedule_work\n"); + schedule_work(&priv->aio_softirq); + } + spin_unlock_irqrestore(&(priv->aio_lock),flags); +} + +EXPORT_SYMBOL (parport_aio_done); + +/** + * parport_aio_soft - task for handling notifications and dma init + * @work work item + */ +void parport_aio_soft(struct work_struct * work) +{ + struct parport_pc_private *priv = container_of(work,struct parport_pc_private,aio_softirq); + struct parport *port=priv->port; +// unsigned long flags; + unsigned long ret; + unsigned char ectr; + struct parport_aio_te *te; + struct list_head * l; + mutex_lock(&(priv->aio_mutex)); + DPRINTK (KERN_DEBUG " aio_soft\n"); + l=&(priv->aio_action_list); + DPRINTK (KERN_DEBUG " next_te\n"); + te=list_first_entry(l,struct parport_aio_te,aio_action_list); + DPRINTK (KERN_DEBUG " list_del\n"); + list_del(l->next); + + DPRINTK (KERN_DEBUG " end dma\n"); + parport_end_dma(port,te); + DPRINTK (KERN_DEBUG " finish\n"); + parport_finish_dma(port,te); + priv->dma_active=0; + + if(te->notify) + { + DPRINTK (KERN_DEBUG " notify\n"); + mutex_unlock(&(priv->aio_mutex)); + te->notify(te->res_ptr,te->buf,te->size_done); + mutex_lock(&(priv->aio_mutex)); + } + else if(te->res_ptr) + { + DPRINTK (KERN_DEBUG " te->res_ptr\n"); + *(te->res_ptr)=te->size_done; + } + DPRINTK (KERN_DEBUG " kfree\n"); + kfree(te); + + + l=&(priv->aio_action_list); + if(list_empty(l)) + { + DPRINTK (KERN_DEBUG " list_empty, active=0\n"); + frob_econtrol (port, (7<<5), (ECR_PS2<<5)); + priv->dma_active=0; + } + else if(priv->dma_active==0)//if notify called submit_... + { + DPRINTK (KERN_DEBUG " next entry\n"); + //start next + do{ + te=list_first_entry(l,struct parport_aio_te,aio_action_list); + DPRINTK (KERN_DEBUG " init_dma\n"); + if((ret=(set_dir(port,te)||parport_init_dma(port,te))))//assign! + { + DPRINTK (KERN_DEBUG " error, remove\n"); + list_del(l->next); + if(te->notify) + { + DPRINTK (KERN_DEBUG " notify\n"); + mutex_unlock(&(priv->aio_mutex)); + te->notify(te->res_ptr,te->buf,0); + mutex_lock(&(priv->aio_mutex)); + } + else if(te->res_ptr) + { + DPRINTK (KERN_DEBUG " te->res_ptr\n"); + *(te->res_ptr)=te->size_done; + } + DPRINTK (KERN_DEBUG " kfree\n"); + kfree(te); + } + }while((priv->dma_active==0)&&(ret!=0)&&(!list_empty(l))); + + if((ret==0)&&(priv->dma_active==0)) + { + DPRINTK (KERN_DEBUG " start_dma\n"); + priv->dma_active=1; + parport_start_dma(port,te); + } + + if(ret) + { + ectr = inb (ECONTROL (port)); + if((port->ieee1284.phase == IEEE1284_PH_FWD_IDLE)&&((ectr&(7<<5))==(ECR_ECP<<5))) + {//we just wrote in ECP mode. wait some time for fifo to go empty before reversing direction. If it does not work, give warning but continue. + const unsigned long FLUSH_DELAY=4*HZ/100; + const unsigned char FIFO_EMPTY=0x01; + if(wait_ecr(port,FIFO_EMPTY,FIFO_EMPTY,FLUSH_DELAY)) + printk(KERN_WARNING "Couldn't flush FIFO in %lu jiffies. Continuing anyway.\n", (unsigned long)FLUSH_DELAY); + } + frob_econtrol (port, (7<<5), (ECR_PS2<<5)); + } + } + mutex_unlock(&(priv->aio_mutex)); +} + +/*disable dma +*returns 1 if not all has been transmitted +*/ +static int parport_end_dma(struct parport *port,struct parport_aio_te *te) +{ + unsigned long dmaflag; + unsigned long count,residue; + struct parport_pc_private *priv = port->physport->private_data; + DPRINTK (KERN_DEBUG " parport_end_dma Ok\n"); + dmaflag = claim_dma_lock(); + + DPRINTK (KERN_DEBUG " disable\n"); + disable_dma(port->dma); + residue = get_dma_residue(port->dma); + DPRINTK (KERN_DEBUG " residue: %lu\n",residue); + + release_dma_lock(dmaflag); + count=priv->dma_blocksize-residue; + DPRINTK (KERN_DEBUG " count: %lu\n",count); + priv->dma_aio_addr+=count;//increment address + priv->size_done+=count; + DPRINTK (KERN_DEBUG " done: %i\n",priv->size_done); + if(residue) + return 1; + return 0; +} + +/* +dma transfer has timed out. +*/ +void parport_aio_timeout(unsigned long p) +{ + struct list_head * l; + unsigned long flags; + struct parport *port=(struct parport *)p; + struct parport_pc_private *priv = port->physport->private_data; + + outb (ECONTROL (port),0x74); + + DPRINTK (KERN_DEBUG " aio_timeout\n"); + spin_lock_irqsave(&(priv->aio_lock),flags); + if(!priv->dma_active) + { + DPRINTK (KERN_DEBUG " dma_not_active\n"); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + return; + } + l=(struct list_head *)&(priv->aio_action_list); + if(list_empty(l)) + { + DPRINTK (KERN_DEBUG " list empty\n"); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + return; + } + + DPRINTK (KERN_DEBUG " schedule_work\n"); + schedule_work(&priv->aio_softirq); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + +} + + +/*finish dma +*/ +static int parport_finish_dma(struct parport *port,struct parport_aio_te *te) +{ + struct device *dev = port->physport->dev; + struct parport_pc_private *priv = port->physport->private_data; + DPRINTK (KERN_DEBUG " parport_finish_dma Ok\n"); + if (priv->dma_aio_handle) { + if(te->action==PARPORT_AIO_ACT_ECP_W_D) + { + DPRINTK (KERN_DEBUG " unmap, DMA_TO_DEVICE\n"); + dma_unmap_single(dev, priv->dma_aio_handle, te->size_to_transfer, DMA_TO_DEVICE); + } + else + { + DPRINTK (KERN_DEBUG " unmap, DMA_FROM_DEVICE\n"); + dma_unmap_single(dev, priv->dma_aio_handle, te->size_to_transfer, DMA_FROM_DEVICE); + } + } + DPRINTK (KERN_DEBUG " add completed request\n"); + te->size_done=priv->size_done; + return 0; +} + + +/* I/O to memory, no autoinit, increment, demand mode */ +#define DMA_MODE_READ_DM 0x04 +/* memory to I/O, no autoinit, increment, demand mode */ +#define DMA_MODE_WRITE_DM 0x08 + +/*start a dma transfer +*/ +static int parport_start_dma(struct parport *port,struct parport_aio_te *te) +{ + unsigned long dmaflag; + size_t count; + struct parport_pc_private *priv = port->physport->private_data; + size_t left = te->size_to_transfer-priv->size_done; + size_t maxlen = 0x10000; /* max 64k per DMA transfer */ + unsigned long start = (unsigned long) te->buf + priv->size_done; + unsigned long end = (unsigned long) te->buf + te->size_to_transfer - 1; + + DPRINTK (KERN_DEBUG " parport_start_dma Ok\n"); + + count = left; + if ((start ^ end) & ~0xffffUL) + { + maxlen = 0x10000 - (start & 0xffff); + DPRINTK (KERN_DEBUG " buffer crosses 64k boundary, maxlen:%i\n",maxlen); + } + if (count > maxlen) + { + DPRINTK (KERN_DEBUG " count > maxlen ... count=maxlen\n"); + count = maxlen; + } + priv->dma_blocksize = count; + + dump_parport_state ("Before",port); + + dmaflag = claim_dma_lock(); + DPRINTK (KERN_DEBUG " disable_dma\n"); + disable_dma(port->dma); + DPRINTK (KERN_DEBUG " clear_dma\n"); + clear_dma_ff(port->dma); + if(te->action==PARPORT_AIO_ACT_ECP_W_D) + { + DPRINTK (KERN_DEBUG " set_dma_mode (write)\n"); + set_dma_mode(port->dma, DMA_MODE_WRITE); + } + else + { + DPRINTK (KERN_DEBUG " set_dma_mode (read)\n"); + set_dma_mode(port->dma, DMA_MODE_READ); + } + DPRINTK (KERN_DEBUG " set_dma_addr: %i, %x\n",port->dma, priv->dma_aio_addr); + set_dma_addr(port->dma, priv->dma_aio_addr); + DPRINTK (KERN_DEBUG " set_dma_count: %i\n", count); + set_dma_count(port->dma, count); + + /* set ECP mode, disable(set) serviceIntr, disable dma, disable(set) err intr*/ + DPRINTK (KERN_DEBUG " frob_econtrol....\n"); + frob_econtrol (port, (7<<5)|(1<<3)|(1<<2)|(1<<4),(ECR_ECP<<5)|(1<<2)|(1<<4)); + + DPRINTK (KERN_DEBUG " frob_econtrol\n"); + /* Set DMA mode */ + frob_econtrol (port, 1<<3, 1<<3); + + /* Clear serviceIntr */ + frob_econtrol (port, 1<<2, 0); + + + DPRINTK (KERN_DEBUG " enable\n"); + enable_dma(port->dma); + release_dma_lock(dmaflag); + +/* DPRINTK (KERN_DEBUG " autofd dwn\n"); + parport_frob_control (port, + PARPORT_CONTROL_AUTOFD, + 0);*/ + + dump_parport_state ("After",port); + + DPRINTK (KERN_DEBUG " init_timer\n"); + init_timer(&priv->aio_action_timeout); + priv->aio_action_timeout.expires=jiffies+(PARPORT_INACTIVITY_O_NONBLOCK*count/10)+10*HZ/100; + DPRINTK (KERN_DEBUG " expires:%lu\n",priv->aio_action_timeout.expires); + priv->aio_action_timeout.data=(unsigned long)port; + priv->aio_action_timeout.function=parport_aio_timeout; + add_timer(&priv->aio_action_timeout); + return 0; +} + +//dma transfer one time (per te)initialisation +static int parport_init_dma(struct parport *port,struct parport_aio_te *te) +{ + struct parport_pc_private *priv = port->physport->private_data; + struct device *dev = port->physport->dev; + size_t count; + size_t left = te->size_to_transfer; + unsigned long end = (unsigned long) te->buf + te->size_to_transfer - 1; + + DPRINTK (KERN_DEBUG " parport_init_dma Ok\n"); + priv->size_done=0; + dump_parport_state (" Before Init",port); + + /* We don't want to be interrupted every ack. */ + DPRINTK (KERN_DEBUG " parport_pc_disable_irq (port)\n"); + parport_pc_disable_irq (port); + /* set ECP mode, disable(set) serviceIntr, disable dma, disable(set) err intr*/ + DPRINTK (KERN_DEBUG " frob_econtrol....\n"); +// frob_econtrol (port, (7<<5)|(1<<3)|(1<<2)|(1<<4),(ECR_PS2<<5)|(1<<2)|(1<<4)); +// frob_econtrol (port, (7<<5)|(1<<3)|(1<<2)|(1<<4),(ECR_ECP<<5)|(1<<2)|(1<<4)); + + //set 1284 active + //do this before +// parport_frob_control (port, + // PARPORT_CONTROL_SELECT, + // 0); + + count=left; + if (end < MAX_DMA_ADDRESS) { + if(te->action==PARPORT_AIO_ACT_ECP_W_D) + { + DPRINTK (KERN_DEBUG " dma_map... DMA_TO_DEVICE\n"); + priv->dma_aio_addr = priv->dma_aio_handle = dma_map_single(dev, (void *)te->buf, te->size_to_transfer, DMA_TO_DEVICE); + } + else + { + DPRINTK (KERN_DEBUG " dma_map... DMA_FROM_DEVICE\n"); + priv->dma_aio_addr = priv->dma_aio_handle = dma_map_single(dev, (void *)te->buf, te->size_to_transfer, DMA_FROM_DEVICE); + } + if(dma_mapping_error(dev,priv->dma_aio_handle)) + { + DPRINTK (KERN_DEBUG " failed\n"); + return -EFAULT; + } + } else { + DPRINTK (KERN_DEBUG " error: DMA Buffer not valid\n"); + return -EFAULT; + } + DPRINTK (KERN_DEBUG " first_byte:0x%02x\n",te->buf[0]); +// schedule_delayed_work(&priv->aio_action_timeout,(PARPORT_INACTIVITY_O_NONBLOCK*te->size_to_transfer/10)+1); + return 0; +} + +/** + * parport_submit_transaction - submit a buffer for async read or write + * @port the port to write or read + * @buf must be a pointer to a buffer suitable for DMA transfer. + * @len the length of the buffer + * @flags PARPORT_AIO_FLG_NOSETUP won't setup direction or Signals before starting transfer. + * Has to be done manually. May be necessary for unusual setups (host2host). Use with caution. + * The lowest 4 bits of DCR must be 0000 or 0100 for the ecp state machine to start. + * With PARPORT_AIO_FLG_NOSETUP this will _not_ be set up on first transfer(PS2 -> ECP) and has to be done before. + * @action_code may be PARPORT_AIO_ACT_ECP_W_D or PARPORT_AIO_ACT_ECP_R_D + * @complete if not NULL and if notify is NULL this should point to an integer. + * It will be written to -1 by this function, and after the transfer completes + * will hold the number of bytes successfully transferred. can be used for polling while(complete!=-1)... + * If notify is not NULL complete will _not_ be written to but acts as user data to the notify function. + * @notify a notification callback. Is called from a work_queue (process context). A driver may use it to trigger actions on transfer completion. + * The arguments to notify() are the complete pointer, the buffer address and the length of the data successfully transferred. + * multiple transfers may be submitted without waiting for completion of previous ones. they will be started(and finished) in the order they were submitted in. + * + * submit_transaction() returns -EINVAL if the action code is invalid, + * -ENOMEM if the transaction could not be allocated, -EIO if there was a problem setting up the initial link direction. + * and 0 on successful queueing of the request. Notification will occur in the last case _only_. + */ +int parport_submit_transaction (struct parport *port, const void *buf, + size_t len, int flags, int action_code, int * complete,void (*notify)(int *data,void * buf,int len_real)) +{ + unsigned long iflags; + int ret; + struct parport_pc_private *priv = port->physport->private_data; + struct parport_aio_te * te=NULL; + struct list_head * l=NULL; + if((action_code<0)||(action_code>=PARPORT_AIO_ACT_BAD)) + return -EINVAL; + //ready + te = kmalloc(sizeof(struct parport_aio_te), GFP_KERNEL); + if(!te) + return -ENOMEM; + DPRINTK (KERN_DEBUG " parport_submit_transaction Ok\n"); + te->buf=(char *)buf; + te->size_to_transfer=len; + te->size_done=0; + te->action=action_code; + te->flags=flags; + te->res_ptr=complete; + te->notify=notify;// PARPORT_INIT_TE(te) + if((!te->notify)&&(te->res_ptr)) + *(te->res_ptr)=-1; + + mutex_lock(&(priv->aio_mutex)); + spin_lock_irqsave(&(priv->aio_lock),iflags); +// act=priv->dma_active; +// spin_unlock_irqrestore(&(priv->aio_lock),iflags); + + //spin_lock_irqsave(&(priv->aio_lock),iflags); + //set + DPRINTK (KERN_DEBUG " list_add_tail\n"); + list_add_tail(&te->aio_action_list, &priv->aio_action_list); + //go + if(priv->dma_active) { + DPRINTK (KERN_DEBUG " dma already active\n"); + spin_unlock_irqrestore(&(priv->aio_lock),iflags); + mutex_unlock(&(priv->aio_mutex)); + return 0; + } + else + { + //ok there is no dma going on so we are the only ones accessing this data... or not? + spin_unlock_irqrestore(&(priv->aio_lock),iflags); + l=&(priv->aio_action_list); + if(list_empty(l)) { + DPRINTK (KERN_DEBUG " list empty\n"); + mutex_unlock(&(priv->aio_mutex)); + return 0; + } + DPRINTK (KERN_DEBUG " get first entry\n"); + te=list_first_entry(l,struct parport_aio_te,aio_action_list); + + //we need to be able to sleep here (for set_dir()) + if((ret=(set_dir(port,te)||parport_init_dma(port,te))))//assign! + { + //remove + list_del(l->next); + frob_econtrol (port, (7<<5), (ECR_PS2<<5)); + DPRINTK (KERN_DEBUG " error, remove\n"); + priv->dma_active=0; + DPRINTK (KERN_DEBUG " kfree\n"); + kfree(te); + mutex_unlock(&(priv->aio_mutex)); + return ret; + } + spin_lock_irqsave(&(priv->aio_lock),iflags); + priv->dma_active=1; + DPRINTK (KERN_DEBUG " dma_active=1\n"); + parport_start_dma(port,te); + spin_unlock_irqrestore(&(priv->aio_lock),iflags); + } + + mutex_unlock(&(priv->aio_mutex)); + return 0; +} + +/** + * parport_cancel_transaction - will remove the first te with matching buf ptr. will not notify of cancelled transfers. + * @port port to work on + * @buf the pointer to look for (they'd better be unique) + * + * returns 1 if inactive queued transfer was removed + * returns 0 if nothing was removed. either it was not found or already started + */ +int parport_cancel_transaction (struct parport *port, const void *buf) +{ + unsigned long flags; + struct parport_pc_private *priv = port->physport->private_data; + struct parport_aio_te * te; + struct list_head * l; + mutex_lock(&(priv->aio_mutex)); + spin_lock_irqsave(&(priv->aio_lock),flags); + list_for_each(l, &(priv->aio_action_list)) + { + te=list_entry(l,struct parport_aio_te,aio_action_list); + if(te->buf==buf) + { + if(l==priv->aio_action_list.next)//already active + { + spin_unlock_irqrestore(&(priv->aio_lock),flags); + mutex_unlock(&(priv->aio_mutex)); + return 0; + } + else + { + //remove + list_del(l); + kfree(te); + spin_unlock_irqrestore(&(priv->aio_lock),flags); + mutex_unlock(&(priv->aio_mutex)); + return 1; + } + } + } + spin_unlock_irqrestore(&(priv->aio_lock),flags); + mutex_unlock(&(priv->aio_mutex)); + return 0; +} + + #endif /* IEEE 1284 support */ #endif /* Allowed to use FIFO/DMA */ @@ -1986,6 +2757,14 @@ static int parport_dma_probe(struct parp return p->dma; } +irqreturn_t parport_pc_irq_handler(int irq, void *dev_id) +{ + struct parport *port = dev_id; + parport_aio_done(port); + return parport_irq_handler(irq,dev_id); +} + + /* --- Initialisation code -------------------------------- */ static LIST_HEAD(ports_list); @@ -2046,6 +2825,17 @@ struct parport *parport_pc_probe_port(un INIT_LIST_HEAD(&priv->list); priv->port = p; + priv->dma_active=\ + priv->dma_aio_addr=\ + priv->dma_aio_handle=\ + priv->dma_blocksize=\ + priv->size_done=0; + + INIT_LIST_HEAD(&priv->aio_action_list); + spin_lock_init(&priv->aio_lock); + mutex_init(&priv->aio_mutex); + INIT_WORK(&priv->aio_softirq,parport_aio_soft); + p->dev = dev; p->base_hi = base_hi; p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; @@ -2157,7 +2947,7 @@ struct parport *parport_pc_probe_port(un EPP_res = NULL; } if (p->irq != PARPORT_IRQ_NONE) { - if (request_irq(p->irq, parport_irq_handler, + if (request_irq(p->irq, parport_pc_irq_handler, irqflags, p->name, p)) { printk(KERN_WARNING "%s: irq %d in use, " "resorting to polled operation\n", @@ -2187,6 +2977,10 @@ struct parport *parport_pc_probe_port(un p->name); free_dma(p->dma); p->dma = PARPORT_DMA_NONE; + } else { + printk("async ops available\n"); + p->ops->submit_transaction=parport_submit_transaction; + p->ops->cancel_transaction=parport_cancel_transaction; } } } diff -uprN prev/drivers/parport/share.c new/drivers/parport/share.c --- prev/drivers/parport/share.c 2012-10-14 23:41:04.000000000 +0200 +++ new/drivers/parport/share.c 2012-10-16 16:14:36.000000000 +0200 @@ -64,6 +64,12 @@ static size_t dead_write (struct parport { return 0; } static size_t dead_read (struct parport *p, void *b, size_t l, int f) { return 0; } +int dead_submit(struct parport *port, const void *buf, + size_t len, int flags, int action_code, int * complete,void (*notify)(int *data,void * buf,int len_real)) + { return -ENODEV;} +int dead_cancel(struct parport *port, const void *buf) + { return -ENODEV;} + static struct parport_operations dead_ops = { .write_data = dead_write_lines, /* data */ .read_data = dead_read_lines, @@ -93,6 +99,9 @@ static struct parport_operations dead_op .ecp_read_data = dead_read, .ecp_write_addr = dead_write, + .submit_transaction=dead_submit,/* async */ + .cancel_transaction=dead_cancel, + .compat_write_data = dead_write, /* compat */ .nibble_read_data = dead_read, /* nibble */ .byte_read_data = dead_read, /* byte */ diff -uprN prev/include/linux/parport.h new/include/linux/parport.h --- prev/include/linux/parport.h 2012-10-14 23:41:04.000000000 +0200 +++ new/include/linux/parport.h 2012-10-16 16:22:25.000000000 +0200 @@ -103,6 +103,19 @@ struct parport_operations { size_t (*ecp_write_addr) (struct parport *port, const void *buf, size_t len, int flags); +/* action values describe what to do */ +#define PARPORT_AIO_ACT_ECP_W_D 0 +#define PARPORT_AIO_ACT_ECP_R_D 1 +#define PARPORT_AIO_ACT_BAD 2 + +/* flags */ +#define PARPORT_AIO_FLG_NOSETUP (1<<4) + + int (*submit_transaction) (struct parport *port, const void *buf, + size_t len, int flags, int action_code, int * complete,void (*notify)(int *data,void * buf,int len_real)); + + int (*cancel_transaction) (struct parport *port, const void *buf); + size_t (*compat_write_data) (struct parport *port, const void *buf, size_t len, int flags); size_t (*nibble_read_data) (struct parport *port, void *buf, diff -uprN prev/include/linux/parport_pc.h new/include/linux/parport_pc.h --- prev/include/linux/parport_pc.h 2012-10-14 23:41:04.000000000 +0200 +++ new/include/linux/parport_pc.h 2012-10-16 15:14:05.000000000 +0200 @@ -2,6 +2,7 @@ #define __LINUX_PARPORT_PC_H #include +#include /* --- register definitions ------------------------------- */ @@ -14,6 +15,24 @@ #define CONTROL(p) ((p)->base + 0x2) #define STATUS(p) ((p)->base + 0x1) #define DATA(p) ((p)->base + 0x0) +/**parport asynchronous transaction entry- they are organised as a first in first out queue + */ +struct parport_aio_te{ + /* links us to the list */ + struct list_head aio_action_list; + /* the buffer pointer */ + char * buf; + /* how much is left, how much is done. We memorize the latter to return in the notification */ + size_t size_to_transfer,size_done; + /* what to do see parport.h*/ + int action; + /* flags, ignored */ + int flags; + /* user data or notification var, may be NULL */ + int * res_ptr; + /* notification callback, may be NULL */ + void (*notify)(int *data,void * buf,int len_real); +}; struct parport_pc_private { /* Contents of CTR. */ @@ -40,6 +59,26 @@ struct parport_pc_private { dma_addr_t dma_handle; struct list_head list; struct parport *port; + + /* whether dma is currently going on */ + int dma_active; + /* size of current block */ + int dma_blocksize; + /* dma_address(variable) and handle */ + dma_addr_t dma_aio_addr, dma_aio_handle; + /* accumulated size for current transfer*/ + size_t size_done; + + /* work_struct to notify, start next transfer etc */ + struct work_struct aio_softirq; + /* timeout if something goes wrong */ + struct timer_list aio_action_timeout; + /* list(queue) of all (pending and active) transaction states for this port */ + struct list_head aio_action_list; + /* protects accesses to this structure (especially dma_active) */ + spinlock_t aio_lock; + /* protects against concurrent accesses between work_structs */ + struct mutex aio_mutex; }; struct parport_pc_via_data