Anonymous
Linux PCI DMA с рассеянным сбором не запускает прерывание
Сообщение
Anonymous » 23 июл 2025, 08:02
Я разрабатываю драйвер PCI Linux для пользовательского устройства PCI, которое включает в себя FPGA и мост PLX. Устройство выполняет передачу DMA в системную память, и мой драйвер отвечает за передачу этих данных в пространство пользователя. Из пользовательского пространства я инициирую команду (SGLIST_CHA), которая запускает драйвер, чтобы выделить 10 буферов по 4096 байт, каждый из которых использует DMA_ALLOC_COHERENT (). Эти буферы являются типичными для пользовательской структуры DMA_LIST (с полями для PADR, LADR, SIZ и DPR), которая содержит информацию DMA SG (Scatter-Gater). Список дескрипторов построен, и физический адрес первого дескриптора записывается в реестр DMADPR0 устройства. Затем, из пространства пользователя, я отправляю буферы ~ 16 КБ (10 раз), которые драйвера прикрепляет с использованием pin_user_pages () и карт с использованием dma_map_sg (). Эти нанесенные на карту записи SG записаны в таблицу дескрипторов, и каждая 4K -передача помечена DMA_READ | Enable_term_int, ожидая прерывания после завершения. Однако, хотя память, по -видимому, правильно написана устройством, я не получаю никаких прерываний. Я использую DMA_SET_MASK_AND_COHERENT (& PDEV-> DEV, DMA_BIT_MASK (32)) из-за аппаратных ограничений с 64-битной адресацией. Я также называю dma_sync_single_for_device () в буфере дескриптора, прежде чем инициировать DMA.
Несмотря на это, в RHEL 9 64 бит PCI не генерирует прерывания или DMA не заполняется из -за проблемы доступа к адресу HW. Я пытаюсь понять, существует ли злоупотребление API Linux DMA, неправомерная синхронизация памяти, проблема выравнивания дескриптора или что -то еще, что приводит к этому. Любая помощь или руководство по решению этой проблемы прерывания были бы очень оценены.
Код: Выделить всё
typedef struct {
uint32_t u32PADR; // PCI Low address
uint32_t u32LADR; // Local Address - Card side
uint32_t u32SIZ; // DMA Transfer size
uint32_t u32DPR; // Descriptor pointer
uint32_t u32HPADR ; // PCI High address
uint32_t dummy2 ;
uint32_t dummy3;
uint32_t dummy4 ;
//uint32_t u32DPR_high; // PCI High address
} DMA_LIST;
typedef struct{
dma_addr_t addr;
u32 size;
u32 flags;
}dma_desc_t;
DMA_LIST *pDmaListA[MAX_LIST_COUNT];
DMA_LIST *pDmaListB[MAX_LIST_COUNT];
dma_addr_t ulStartPtrA[MAX_LIST_COUNT + 1];
dma_addr_t ulStartPtrB[MAX_LIST_COUNT + 1];
dma_addr_t ulHwStartPtrA[MAX_LIST_COUNT + 1];
dma_addr_t ulHwStartPtrB[MAX_LIST_COUNT + 1];
ULONG iListCntA,iListCntB;
UCHAR* pchVirtAddr[MAX_DEVICES] = {NULL} ;
ULONG ulTotalDmaListA[MAX_DEVICES][MAX_LIST_COUNT];
ULONG ulTotalDmaListB[MAX_DEVICES][MAX_LIST_COUNT];
UCHAR uchIntChannelA[MAX_DEVICES];
UCHAR uchIntChannelB[MAX_DEVICES];
UCHAR uchIntChannelAB[MAX_DEVICES];
ULONG ulOpen[MAX_DEVICES];
ULONG iListCntA,iListCntB;
ULONG ulListCntA[MAX_DEVICES],ulListCntB[MAX_DEVICES];
ULONG ulIntCntA[MAX_DEVICES],ulIntCntB[MAX_DEVICES];
ULONG ulPages_listA[MAX_LIST_COUNT],ulPages_listB[MAX_LIST_COUNT],ulPages_memA[MAX_LIST_COUNT],ulPages_memB[MAX_LIST_COUNT];
void* dma_coherent_virtA[MAX_LIST_COUNT];
dma_addr_t dma_coherent_handleA[MAX_LIST_COUNT];
void *Base_Add;
void* ITP_Mapped_Addr[3] = {0};
unsigned long long mem_size[3]; //64 Bit
void *BaseAddress[3];
CONFIG_STRUCT stConfig;
struct cdev Struct_ITP;
struct page **pBuffId = NULL ;
struct page **pEventA;
struct page **pEventB;
struct page **pages_list_sgA[MAX_LIST_COUNT];
struct page **pages_list_sgB[MAX_LIST_COUNT];
struct page **pages_mem_sgA[MAX_LIST_COUNT];
struct page **pages_mem_sgB[MAX_LIST_COUNT];
struct scatterlist *dma_list_sgA[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
struct scatterlist *dma_list_sgB[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
struct scatterlist *dma_mem_sgA[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
struct scatterlist *dma_mem_sgB[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}};
int dma_list_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_list_sgcntB[MAX_DEVICES][MAX_LIST_COUNT];
int dma_mem_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_mem_sgcntB[MAX_DEVICES][MAX_LIST_COUNT];
ULONG ulDmaLength[MAX_DEVICES][2] ;
long lDmaRemndr[MAX_DEVICES][2] ;
int bDevCloseStatusA[MAX_DEVICES] ;
int bDevCloseStatusB[MAX_DEVICES] ;
int iCount;
UCHAR irq[MAX_DEVICES];
UCHAR uchChannel;
PUCHAR chBuffId;
PUCHAR eventA;
PUCHAR eventB;
dev_t devnum;
#define DEBUG 1
#define __DEBUG__ 1
void* PCM_Mapped_Addr[MAX_DEVICES];
void* EEPROM_Mapped_Addr[MAX_DEVICES];
void *Base_Addr[MAX_DEVICES];
pid_t processno;
int ITPflag[MAX_DEVICES];
ULONG SignalNo[MAX_DEVICES][2];
ULONG Pid[MAX_DEVICES][2],gMinor;
struct pid *pPidVal[MAX_DEVICES][2] = {{NULL}};
int cnt=0;
struct pci_device_id devid[3];
struct pci_device_id *gDevid;
struct device* gDev[MAX_DEVICES];
int count=0,h;
dma_addr_t simple_region_start,simple_region_size;
void ITP_do_tasklet(struct tasklet_struct *unused );
//void ITP_do_tasklet(unsigned long );
DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet);
//DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet,1);
irqreturn_t ITP_isr(int ,void *);
void ITP_unallocate(struct device*,int,int);
void ITPDmaExecutionRoutine(struct scatterlist*,int,int);
static struct pci_device_id ITP[] = {
{ PCI_DEVICE(VENDOR,DEVICE)},
{0, }
};
MODULE_DEVICE_TABLE(pci,ITP);
static int probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int j,ret;
unsigned long max,min,ulAddr;
int iBaseCount = 0;
ret = pci_enable_device(dev);
if(ret)
{
printk(" Error in enabling PCI device") ;
}
for(j=0;jdev, DMA_BIT_MASK(32)))
{
if (dma_set_mask (&dev->dev, 0x07FFFFF))
printk("DRIVER:Device No : %d DMA Operation is Allowed\n",count);
else
printk("DRIVER:Device No : %d DMA Operation is Denied\n",count);
}
else
{
printk("Using fallback 32-bit DMA mask\n");
}
irq[count] = (UCHAR)dev->irq;
devid[count] = *id;
gDev[count] = &dev->dev;
count++;
return 0;
}
void remove(struct pci_dev *dev)
{
int iBaseCnt = 0;
#ifdef DEBUG
printk("Device Remove\n");
#endif
for(iBaseCnt = 0;iBaseCnt < TOTAL_DEVICE_RESOURCE;iBaseCnt++)
{
iounmap(ITP_Mapped_Addr[iBaseCnt]);
}
}
static int ITP_open(struct inode *inode, struct file *filp)
{
int minor,status;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor - 1;
#ifdef DEBUG
printk("Device Opened Minor: %d\n",minor);
printk("Device Opened IRQ: %d\n",irq[minor]);
#endif
if(ulOpen[minor] == 0)
{
status = request_irq(irq[minor],&ITP_isr,IRQF_SHARED,"PCIITP",&devid[minor]);// in RHEL 8 onwards IRQF_DISABLED become obsolete in RHEL 6 //SA_INTERRUPT changes to IRQF_DISABLED
if(status)
printk("Error:IRQ Request Failed %d\n",status);
else
{
printk("IRQ Request Succ+eded %d %d with SZ of DMA_LIST = %lu \n",status,irq[minor],sizeof(DMA_LIST));
}
ulDmaLength[minor][0] = 0 ;
ulDmaLength[minor][1] = 0 ;
lDmaRemndr[minor][0] = 0 ;
lDmaRemndr[minor][1] = 0 ;
bDevCloseStatusA[minor] = 0 ; //false
bDevCloseStatusB[minor] = 0 ;
}
ulOpen[minor]++;
cnt=0;
return 0;
}
static int ITP_close(struct inode *inode, struct file *filp)
{
int minor,i;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_path.dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Closed Minor: %d\n",minor);
#endif
ulOpen[minor]--;
if(ulOpen[minor] == 0)
{
bDevCloseStatusA[minor] = 1 ; //true
bDevCloseStatusB[minor] = 1 ;
#ifdef DEBUG
printk("Stoppting Acquisition: %d\n",minor);
#endif
if(EEPROM_Mapped_Addr[minor])
{
writel( 0x1414 ,(UINT *)EEPROM_Mapped_Addr[minor] + DMACSR0/4);
}
if(PCM_Mapped_Addr[minor])
{
writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE0);
writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE1);
}
//Free everything
}
return 0;
}
ssize_t ITP_read(struct file *filp, char *buff, size_t count, loff_t *offset)
{
int minor,result = 0,i = 0; //force = 1
int err = 0;
dma_addr_t NoPages = 0,addr = 0;
dma_addr_t ulLength = 0,uloffset1 = 0,ulTotalLen = 0;
void* vPtr = NULL;
unsigned long first,last,ulOffset,uaddr;
dma_addr_t dma_handle;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Reading Operation Minor: %d\n",minor);
printk("Memory size to be mapped for id %d %ld\n",uchChannel,(ULONG)count);
#endif
addr = (uintptr_t)buff;
uaddr = (unsigned long)buff;
first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((uaddr+count-1) & PAGE_MASK) >> PAGE_SHIFT;
ulOffset = uaddr & ~PAGE_MASK;
NoPages = last-first+1;
if(count == 2096)
{
count = DMA_LIST_SIZE;
}
#ifdef DEBUG
printk("init user [0x%llx+0x%llx => %lld pages]\n",addr,(dma_addr_t)count,NoPages);
#endif
if(uchChannel == SGLIST_CHA)
{
result = 1 ;
vPtr = dma_alloc_coherent(gDev[minor], count, &dma_handle, GFP_KERNEL);
if (!vPtr) {
printk("dma_alloc_coherent failed for data buffer\n");
return -ENOMEM;
}
dma_coherent_virtA[iListCntA] = vPtr;
dma_coherent_handleA[iListCntA] = dma_handle;
pchVirtAddr[minor] = vPtr;
ulStartPtrA[iListCntA] = dma_handle ;
#ifdef DEBUG
printk("SGLIST_CHA : %lu pchVirtAddr %X \n",iListCntA,pchVirtAddr[minor] );
printk("Allocated: virt=%p (aligned=%d), phys=0x%llx (aligned=%d)\n",vPtr, IS_ALIGNED((unsigned long)vPtr, 16),dma_handle, !(dma_handle & 0xF));
#endif
if(dma_list_sgA[minor][iListCntA] != NULL)
{
kfree(dma_list_sgA[minor][iListCntA]);
dma_list_sgA[minor][iListCntA] = NULL;
}
dma_list_sgA[minor][iListCntA] = kmalloc_array(result,sizeof(struct scatterlist),GFP_KERNEL);
sg_init_table(dma_list_sgA[minor][iListCntA],result) ;
printk("SGLIST_CHA .. in for Length of cnt:%d %d %d \n",iListCntA,ulLength,uloffset1) ;
sg_set_buf(&dma_list_sgA[minor][iListCntA][0], vPtr, count);
dma_list_sgcntA[minor][iListCntA] = result;
printk(" dma_list_sgcntA %d ",dma_list_sgcntA[minor][iListCntA]) ;
result = dma_map_sg(gDev[minor],dma_list_sgA[minor][iListCntA],dma_list_sgcntA[minor][iListCntA],DMA_TO_DEVICE);
dma_list_sgcntA[minor][iListCntA] = result;
#ifdef DEBUG
printk("SGLIST_CHA : No of Dma Mapped List %d\n",dma_list_sgcntA[minor][iListCntA]);
#endif
printk("SGLIST_CHA Length of cnt:%lu %ld \n",iListCntA,count) ;
ITPDmaExecutionRoutine(dma_list_sgA[minor][iListCntA],minor,dma_list_sgcntA[minor][iListCntA]);
}
if(uchChannel == CHA)
{
pages_mem_sgA[iListCntA]= kmalloc_array(NoPages, sizeof(struct page*),GFP_KERNEL);
//down_read(¤t->mm->mmap_sem);
mmap_read_lock(current->mm);
result= pin_user_pages(uaddr & PAGE_MASK, // start virtual address
NoPages, // number of pages
FOLL_WRITE | FOLL_FORCE, // gup_flags
pages_mem_sgA[iListCntA],// output pages
NULL); // no VMA info needed
//up_read(¤t->mm->mmap_sem);
mmap_read_unlock(current->mm);
if (result == 0)
{
NoPages = (err >= 0) ? err : 0;
#ifdef DEBUG
printk("get_user_pages: err=%d [%lld]\n",result,NoPages);
#endif
return err < 0 ? err : -EINVAL;
}
ulPages_memA[iListCntA] = result;
vPtr = kmap(pages_mem_sgA[iListCntA][0]);
pchVirtAddr[minor] = (char*)vPtr;
pchVirtAddr[minor] = pchVirtAddr[minor] + ulOffset;
#ifdef DEBUG
printk("CHA : No of Pages Mapped %d\n",result);
#endif
ulTotalLen = 0;
for(i = 0;i< result;i++)
{
if(i == 0)
{
ulLength = PAGE_SIZE - ulOffset;
ulTotalLen = ulLength;
uloffset1 = ulOffset;
}
else if(i == (result - 1))
{
ulLength = count - ulTotalLen;
uloffset1 = 0;
}
else
{
ulLength = PAGE_SIZE;
ulTotalLen = ulTotalLen + ulLength;
uloffset1 = 0;
}
if(i == 0)
{
if(dma_mem_sgA[minor][iListCntA] != NULL)
{
kfree(dma_mem_sgA[minor][iListCntA]) ;
dma_mem_sgA[minor][iListCntA] = NULL ;
}
dma_mem_sgA[minor][iListCntA] = kmalloc_array(DMA_MAX_LIST_COUNT,sizeof(struct scatterlist),GFP_KERNEL);
sg_init_table(dma_mem_sgA[minor][iListCntA],result) ;
}
//printk("CHA .. in for Length of cnt:%d %d %d \n",iListCntA,ulLength,uloffset1) ;
sg_set_page(&dma_mem_sgA[minor][iListCntA][i],pages_mem_sgA[iListCntA][i],ulLength,uloffset1) ;
dma_mem_sgcntA[minor][iListCntA] = result;
}
result = dma_map_sg(gDev[minor],dma_mem_sgA[minor][iListCntA], dma_mem_sgcntA[minor][iListCntA],DMA_FROM_DEVICE);
dma_mem_sgcntA[minor][iListCntA] = result;
#ifdef DEBUG
printk("CHA :No of Dma Mapped List %d\n",dma_mem_sgcntA[minor][iListCntA]);
#endif
printk(" CHA Length of cnt:%lu %ld \n",iListCntA,count) ;
ITPDmaExecutionRoutine(dma_mem_sgA[minor][iListCntA],minor,dma_mem_sgcntA[minor][iListCntA]);
dma_sync_sg_for_device(gDev[minor], dma_mem_sgA[minor][iListCntA],dma_mem_sgcntA[minor][iListCntA],DMA_FROM_DEVICE );
ulDmaLength[minor][0] = count ;
lDmaRemndr[minor][0] = 0 ;
bDevCloseStatusA[minor] = 0 ;
}
return 0;
}
void ITPDmaExecutionRoutine(struct scatterlist *sglist,int minor,int NumberOfElements)
{
uint32_t *desc ;
UCHAR ucQwrdAlign;
UCHAR uchFlag = 0;
UINT i;
int index = minor;
dma_addr_t ulNextDmaList = 0,alligned_addr ;
struct scatterlist *sgEntry ;
if(uchChannel == SGLIST_CHA)
{
uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT;
ulStartPtrA[iListCntA] = sg_dma_address(&sglist[0]);
#ifdef DEBUG
if (ulStartPtrA[iListCntA] >> 32)
{
printk("ERROR: DMA address has upper 32 bits set, but hardware supports only 32-bit!\n");
}
else
{
printk("ulStartPtrA %llx",ulStartPtrA[iListCntA] ) ;
}
#endif
ucQwrdAlign = (UCHAR)(0x10 - (ulStartPtrA[iListCntA] & 0x0F));
ucQwrdAlign = 0 ;
alligned_addr = ulStartPtrA[iListCntA] + ucQwrdAlign;
ulStartPtrA[iListCntA] = ulStartPtrA[iListCntA] + ucQwrdAlign;
ulStartPtrA[iListCntA] = ulStartPtrA[iListCntA] | uchFlag;
pDmaListA[iListCntA] = (DMA_LIST*)(pchVirtAddr[index] + ucQwrdAlign);
#ifdef DEBUG
printk( "ITPDmaExecutionRoutine : Device %d : sglistA %d iListCntA %ld ucQwrdAlign %d pDmaListA %llx \n",index,NumberOfElements,iListCntA,ucQwrdAlign,pDmaListA[iListCntA] );
#endif
iListCntA++;
if(iListCntA == MAX_LIST_COUNT)
{
dma_sync_single_for_device(gDev[minor],ulStartPtrA[0],sizeof(DMA_LIST),DMA_TO_DEVICE);
writel( ulStartPtrA[0] ,(UINT *)EEPROM_Mapped_Addr[minor] + DMADPR0/4);
iListCntA = 0;
#ifdef DEBUG
printk("To UINT (*)EEPROM_Mapped_Addr[minor] + DMADPR0/4) %llx ",ulStartPtrA[0] ) ;
alligned_addr = readl((u32 *)EEPROM_Mapped_Addr[minor] + DMADPR0/4);
printk("From UINT (*)EEPROM_Mapped_Addr[minor] + DMADPR0/4) %llx ",alligned_addr) ;
#endif
}
}
if(uchChannel == CHA)
{
uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT;
for_each_sg(sglist,sgEntry,NumberOfElements,i)
{
ulNextDmaList = (ulStartPtrA[iListCntA] & 0xFFFFFFF0) + (sizeof(DMA_LIST)*(i + 1));
alligned_addr = sg_dma_address(sgEntry);
pDmaListA[iListCntA][i].u32PADR = lower_32_bits(alligned_addr) ;
pDmaListA[iListCntA][i].u32LADR = LOCAL_DEVICE_ADDRESS_A;
pDmaListA[iListCntA][i].u32SIZ = sg_dma_len(sgEntry);
pDmaListA[iListCntA][i].u32DPR = ulNextDmaList | uchFlag ;
pDmaListA[iListCntA][i].u32HPADR = upper_32_bits(alligned_addr) ;
#ifdef DEBUG
printk("sg_dma_address %08llX u32PADR %x:%x u32LADR %x u32SIZ %x u32DPR %x ",sg_dma_address(sgEntry),pDmaListA[iListCntA][i].u32HPADR,pDmaListA[iListCntA][i].u32PADR ,pDmaListA[iListCntA][i].u32LADR ,pDmaListA[iListCntA][i].u32SIZ ,pDmaListA[iListCntA][i].u32DPR ) ;
#endif
}
pDmaListA[iListCntA][i - 1].u32DPR = ulStartPtrA[iListCntA + 1];
ulTotalDmaListA[index][iListCntA]= NumberOfElements;
printk("pDmaListA[iListCntA][i - 1].u32DPR [%lu][%d]ulStartPtrA[iListCntA + 1] %lu = %x",iListCntA,i - 1,iListCntA + 1,pDmaListA[iListCntA][i - 1].u32DPR ) ;
iListCntA++;
if(iListCntA == MAX_LIST_COUNT)
{
//pDmaListA[iListCntA - 1][i - 1].u32DPR = ulNextDmaList | END_OF_CHAIN;
pDmaListA[iListCntA - 1][i - 1].u32DPR = (ulStartPtrA[0] & 0xFFFFFFF0) | uchFlag ;
#ifdef DEBUG
printk("iListCntA == MAX_LIST_COUNT [%lu][%d] = %x",iListCntA - 1,i - 1,pDmaListA[iListCntA - 1][i - 1].u32DPR ) ;
#endif
iListCntA = 0;
desc = (uint32_t*)(dma_coherent_virtA[0] ) ;
printk("PHY to VIRT :: %X = PADR : %08X LADR %08X SIZ %08X, DPR %08X",(uint32_t*)(dma_coherent_virtA[0]) ,desc[0],desc[1],desc[2],desc[3]);
}
}
}
ssize_t ITP_write(struct file *filp, const char *buf, size_t count, loff_t *offset)
{
int minor,i=0,ret= 0 ;
Uchar bufVal[10] ;
minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev);
if(minor == 0)
return 0;
else
minor = minor -1;
#ifdef DEBUG
printk("Device Writing Operation Minor: %d\n",minor);
#endif
ret = copy_from_user(bufVal,(UCHAR*)buf,sizeof(UCHAR)*count) ;
return 0;
}
irqreturn_t ITP_isr(int i, void *dev_id)
{
int j,minor = 0;
//struct pid *pPidVal = NULL ;
ULONG ulChanReg,ulLocalInt=0,ulLocalInt2 = 0 ;
UCHAR uchClrIntReg;
printk(" In ISR") ;
for(j = 0;j= ulTotalDmaListA[minor][ulListCntA[minor]])
{
ulLocalInt = readl((UINT *)PCM_Mapped_Addr[minor]+ 0x20);
ulIntCntA[minor] = 0;
chBuffId[0] = (UCHAR)ulListCntA[minor];
if(pPidVal[minor][0])
{
kill_pid(pPidVal[minor][0],SignalNo[minor][0], 1);
}
else
printk("ERROR: CHA Sending signal Failed\n");
ulListCntA[minor]++;
if(ulListCntA[minor] == MAX_LIST_COUNT)
ulListCntA[minor] = 0;
}
}
return IRQ_HANDLED;
}
void ITP_do_tasklet(struct tasklet_struct *unused)
{
int minor;
minor = h;
#ifdef DEBUG
printk("DRIVER:TASKLET routine %d\n",minor);
#endif
}
void simple_vma_open(struct vm_area_struct *vma)
{
printk(KERN_NOTICE "Simple VMA open, virt %lx, phys %lx\n",
vma->vm_start, vma->vm_pgoff vm_pgoff);
printk("DRIVER:MMAP routine size %lX\n",vma->vm_end - vma->vm_start);
#endif
result = remap_pfn_range( vma,
vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot
);
if(result)
return -EAGAIN;
vma->vm_ops = &simple_remap_vm_ops;
simple_vma_open(vma);
return 0;
}
void ITP_unallocate(struct device *dev,int minor,int ch)
{
int i,j;
printk("Deallocating Lists\n") ;
}
//int ITP_control(struct inode *inode, struct file *filp, unsigned int command, unsigned long argument)
long ITP_control( struct file *filp, unsigned int command, unsigned long argument)
{
int g_iMinor_No,TotalCard,minor,iData = 0;
ITP_VERSION stVersion;
UCHAR uchData = 0;
ULONG ulData = 0,ulAddr = 0,ulDataArr[10];
USHORT usData = 0;
char retVal;
g_iMinor_No = MINOR ( filp->f_path.dentry->d_inode->i_rdev );
minor = g_iMinor_No;
switch(command)
{
case ITP_TOTAL_DEVICES:
TotalCard = count;
retVal = copy_to_user((int*)argument,&TotalCard ,sizeof(int));
break;
case ITP_SET_SIGNAL_NO :
retVal = copy_from_user(ulDataArr,(PULONG)argument,sizeof(ULONG) * 10);
ulData = ulDataArr[0];
SignalNo[minor][ulData] = ulDataArr[1];
Pid[minor][ulData] = ulDataArr[2];
pPidVal[minor][ulData] = find_vpid(Pid[minor][ulData]) ;
//printk("%ld %ld %ld\n",ulData,SignalNo[minor][ulData],Pid[minor][ulData]);
break;
case ITP_GET_PHYSICAL_ADDR :
retVal = copy_from_user(&ulData,(PULONG)argument,sizeof(ULONG));
iData = (int)ulData;
ulData = (ULONG)BaseAddress[iData];
retVal = copy_to_user((PULONG)argument,&ulData ,sizeof(ULONG));
break;
case ITP_STOP_DEV_A:
bDevCloseStatusA[minor] = 1 ;
printk("Stop Device invoked in CHA \n") ;
break ;
case ITP_STOP_DEV_B:
bDevCloseStatusB[minor] = 1 ;
printk("Stop Device invoked in CHB \n") ;
break ;
default :
break;
}
return 0;
}
struct file_operations ITP_fops = {
read : ITP_read,
write : ITP_write,
open : ITP_open,
release : ITP_close,
unlocked_ioctl : ITP_control,
mmap : ITP_mmap,
owner : THIS_MODULE,
};
static struct pci_driver ITP_driver = {
.name = "PCIITP",
.id_table = ITP,
.probe = probe,
//.remove = __devexit_p(remove),
};
int __init ITP_Init(void)
{
int status,i;
printk("/********** PciITP Module Init********************************/\n");
status = pci_register_driver(&ITP_driver);
if(status >= 0)
{
printk("Pci registeraion succeeded\n");
}
else
{
printk("Pci registeraion Failed\n");
}
cdev_init(&Struct_ITP,&ITP_fops);
Struct_ITP.owner = THIS_MODULE;
Struct_ITP.ops = &ITP_fops;
i = 0;
for(i = 0;i
Подробнее здесь: [url]https://stackoverflow.com/questions/79710844/linux-pci-dma-with-scatter-gather-not-triggering-interrupt[/url]
1753246971
Anonymous
Я разрабатываю драйвер PCI Linux для пользовательского устройства PCI, которое включает в себя FPGA и мост PLX. Устройство выполняет передачу DMA в системную память, и мой драйвер отвечает за передачу этих данных в пространство пользователя. Из пользовательского пространства я инициирую команду (SGLIST_CHA), которая запускает драйвер, чтобы выделить 10 буферов по 4096 байт, каждый из которых использует DMA_ALLOC_COHERENT (). Эти буферы являются типичными для пользовательской структуры DMA_LIST (с полями для PADR, LADR, SIZ и DPR), которая содержит информацию DMA SG (Scatter-Gater). Список дескрипторов построен, и физический адрес первого дескриптора записывается в реестр DMADPR0 устройства. Затем, из пространства пользователя, я отправляю буферы ~ 16 КБ (10 раз), которые драйвера прикрепляет с использованием pin_user_pages () и карт с использованием dma_map_sg (). Эти нанесенные на карту записи SG записаны в таблицу дескрипторов, и каждая 4K -передача помечена DMA_READ | Enable_term_int, ожидая прерывания после завершения. Однако, хотя память, по -видимому, правильно написана устройством, я не получаю никаких прерываний. Я использую DMA_SET_MASK_AND_COHERENT (& PDEV-> DEV, DMA_BIT_MASK (32)) из-за аппаратных ограничений с 64-битной адресацией. Я также называю dma_sync_single_for_device () в буфере дескриптора, прежде чем инициировать DMA. Несмотря на это, в RHEL 9 64 бит PCI не генерирует прерывания или DMA не заполняется из -за проблемы доступа к адресу HW. Я пытаюсь понять, существует ли злоупотребление API Linux DMA, неправомерная синхронизация памяти, проблема выравнивания дескриптора или что -то еще, что приводит к этому. Любая помощь или руководство по решению этой проблемы прерывания были бы очень оценены.[code]typedef struct { uint32_t u32PADR; // PCI Low address uint32_t u32LADR; // Local Address - Card side uint32_t u32SIZ; // DMA Transfer size uint32_t u32DPR; // Descriptor pointer uint32_t u32HPADR ; // PCI High address uint32_t dummy2 ; uint32_t dummy3; uint32_t dummy4 ; //uint32_t u32DPR_high; // PCI High address } DMA_LIST; typedef struct{ dma_addr_t addr; u32 size; u32 flags; }dma_desc_t; DMA_LIST *pDmaListA[MAX_LIST_COUNT]; DMA_LIST *pDmaListB[MAX_LIST_COUNT]; dma_addr_t ulStartPtrA[MAX_LIST_COUNT + 1]; dma_addr_t ulStartPtrB[MAX_LIST_COUNT + 1]; dma_addr_t ulHwStartPtrA[MAX_LIST_COUNT + 1]; dma_addr_t ulHwStartPtrB[MAX_LIST_COUNT + 1]; ULONG iListCntA,iListCntB; UCHAR* pchVirtAddr[MAX_DEVICES] = {NULL} ; ULONG ulTotalDmaListA[MAX_DEVICES][MAX_LIST_COUNT]; ULONG ulTotalDmaListB[MAX_DEVICES][MAX_LIST_COUNT]; UCHAR uchIntChannelA[MAX_DEVICES]; UCHAR uchIntChannelB[MAX_DEVICES]; UCHAR uchIntChannelAB[MAX_DEVICES]; ULONG ulOpen[MAX_DEVICES]; ULONG iListCntA,iListCntB; ULONG ulListCntA[MAX_DEVICES],ulListCntB[MAX_DEVICES]; ULONG ulIntCntA[MAX_DEVICES],ulIntCntB[MAX_DEVICES]; ULONG ulPages_listA[MAX_LIST_COUNT],ulPages_listB[MAX_LIST_COUNT],ulPages_memA[MAX_LIST_COUNT],ulPages_memB[MAX_LIST_COUNT]; void* dma_coherent_virtA[MAX_LIST_COUNT]; dma_addr_t dma_coherent_handleA[MAX_LIST_COUNT]; void *Base_Add; void* ITP_Mapped_Addr[3] = {0}; unsigned long long mem_size[3]; //64 Bit void *BaseAddress[3]; CONFIG_STRUCT stConfig; struct cdev Struct_ITP; struct page **pBuffId = NULL ; struct page **pEventA; struct page **pEventB; struct page **pages_list_sgA[MAX_LIST_COUNT]; struct page **pages_list_sgB[MAX_LIST_COUNT]; struct page **pages_mem_sgA[MAX_LIST_COUNT]; struct page **pages_mem_sgB[MAX_LIST_COUNT]; struct scatterlist *dma_list_sgA[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}}; struct scatterlist *dma_list_sgB[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}}; struct scatterlist *dma_mem_sgA[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}}; struct scatterlist *dma_mem_sgB[MAX_DEVICES][MAX_LIST_COUNT] = {{NULL}}; int dma_list_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_list_sgcntB[MAX_DEVICES][MAX_LIST_COUNT]; int dma_mem_sgcntA[MAX_DEVICES][MAX_LIST_COUNT],dma_mem_sgcntB[MAX_DEVICES][MAX_LIST_COUNT]; ULONG ulDmaLength[MAX_DEVICES][2] ; long lDmaRemndr[MAX_DEVICES][2] ; int bDevCloseStatusA[MAX_DEVICES] ; int bDevCloseStatusB[MAX_DEVICES] ; int iCount; UCHAR irq[MAX_DEVICES]; UCHAR uchChannel; PUCHAR chBuffId; PUCHAR eventA; PUCHAR eventB; dev_t devnum; #define DEBUG 1 #define __DEBUG__ 1 void* PCM_Mapped_Addr[MAX_DEVICES]; void* EEPROM_Mapped_Addr[MAX_DEVICES]; void *Base_Addr[MAX_DEVICES]; pid_t processno; int ITPflag[MAX_DEVICES]; ULONG SignalNo[MAX_DEVICES][2]; ULONG Pid[MAX_DEVICES][2],gMinor; struct pid *pPidVal[MAX_DEVICES][2] = {{NULL}}; int cnt=0; struct pci_device_id devid[3]; struct pci_device_id *gDevid; struct device* gDev[MAX_DEVICES]; int count=0,h; dma_addr_t simple_region_start,simple_region_size; void ITP_do_tasklet(struct tasklet_struct *unused ); //void ITP_do_tasklet(unsigned long ); DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet); //DECLARE_TASKLET(ITP_tasklet, ITP_do_tasklet,1); irqreturn_t ITP_isr(int ,void *); void ITP_unallocate(struct device*,int,int); void ITPDmaExecutionRoutine(struct scatterlist*,int,int); static struct pci_device_id ITP[] = { { PCI_DEVICE(VENDOR,DEVICE)}, {0, } }; MODULE_DEVICE_TABLE(pci,ITP); static int probe(struct pci_dev *dev, const struct pci_device_id *id) { int j,ret; unsigned long max,min,ulAddr; int iBaseCount = 0; ret = pci_enable_device(dev); if(ret) { printk(" Error in enabling PCI device") ; } for(j=0;jdev, DMA_BIT_MASK(32))) { if (dma_set_mask (&dev->dev, 0x07FFFFF)) printk("DRIVER:Device No : %d DMA Operation is Allowed\n",count); else printk("DRIVER:Device No : %d DMA Operation is Denied\n",count); } else { printk("Using fallback 32-bit DMA mask\n"); } irq[count] = (UCHAR)dev->irq; devid[count] = *id; gDev[count] = &dev->dev; count++; return 0; } void remove(struct pci_dev *dev) { int iBaseCnt = 0; #ifdef DEBUG printk("Device Remove\n"); #endif for(iBaseCnt = 0;iBaseCnt < TOTAL_DEVICE_RESOURCE;iBaseCnt++) { iounmap(ITP_Mapped_Addr[iBaseCnt]); } } static int ITP_open(struct inode *inode, struct file *filp) { int minor,status; minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev); if(minor == 0) return 0; else minor = minor - 1; #ifdef DEBUG printk("Device Opened Minor: %d\n",minor); printk("Device Opened IRQ: %d\n",irq[minor]); #endif if(ulOpen[minor] == 0) { status = request_irq(irq[minor],&ITP_isr,IRQF_SHARED,"PCIITP",&devid[minor]);// in RHEL 8 onwards IRQF_DISABLED become obsolete in RHEL 6 //SA_INTERRUPT changes to IRQF_DISABLED if(status) printk("Error:IRQ Request Failed %d\n",status); else { printk("IRQ Request Succ+eded %d %d with SZ of DMA_LIST = %lu \n",status,irq[minor],sizeof(DMA_LIST)); } ulDmaLength[minor][0] = 0 ; ulDmaLength[minor][1] = 0 ; lDmaRemndr[minor][0] = 0 ; lDmaRemndr[minor][1] = 0 ; bDevCloseStatusA[minor] = 0 ; //false bDevCloseStatusB[minor] = 0 ; } ulOpen[minor]++; cnt=0; return 0; } static int ITP_close(struct inode *inode, struct file *filp) { int minor,i; minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_path.dentry->d_inode->i_rdev); if(minor == 0) return 0; else minor = minor -1; #ifdef DEBUG printk("Device Closed Minor: %d\n",minor); #endif ulOpen[minor]--; if(ulOpen[minor] == 0) { bDevCloseStatusA[minor] = 1 ; //true bDevCloseStatusB[minor] = 1 ; #ifdef DEBUG printk("Stoppting Acquisition: %d\n",minor); #endif if(EEPROM_Mapped_Addr[minor]) { writel( 0x1414 ,(UINT *)EEPROM_Mapped_Addr[minor] + DMACSR0/4); } if(PCM_Mapped_Addr[minor]) { writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE0); writel( 0 ,(UINT *)PCM_Mapped_Addr[minor] + DMA_TRANS_ENABLE1); } //Free everything } return 0; } ssize_t ITP_read(struct file *filp, char *buff, size_t count, loff_t *offset) { int minor,result = 0,i = 0; //force = 1 int err = 0; dma_addr_t NoPages = 0,addr = 0; dma_addr_t ulLength = 0,uloffset1 = 0,ulTotalLen = 0; void* vPtr = NULL; unsigned long first,last,ulOffset,uaddr; dma_addr_t dma_handle; minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev); if(minor == 0) return 0; else minor = minor -1; #ifdef DEBUG printk("Device Reading Operation Minor: %d\n",minor); printk("Memory size to be mapped for id %d %ld\n",uchChannel,(ULONG)count); #endif addr = (uintptr_t)buff; uaddr = (unsigned long)buff; first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; last = ((uaddr+count-1) & PAGE_MASK) >> PAGE_SHIFT; ulOffset = uaddr & ~PAGE_MASK; NoPages = last-first+1; if(count == 2096) { count = DMA_LIST_SIZE; } #ifdef DEBUG printk("init user [0x%llx+0x%llx => %lld pages]\n",addr,(dma_addr_t)count,NoPages); #endif if(uchChannel == SGLIST_CHA) { result = 1 ; vPtr = dma_alloc_coherent(gDev[minor], count, &dma_handle, GFP_KERNEL); if (!vPtr) { printk("dma_alloc_coherent failed for data buffer\n"); return -ENOMEM; } dma_coherent_virtA[iListCntA] = vPtr; dma_coherent_handleA[iListCntA] = dma_handle; pchVirtAddr[minor] = vPtr; ulStartPtrA[iListCntA] = dma_handle ; #ifdef DEBUG printk("SGLIST_CHA : %lu pchVirtAddr %X \n",iListCntA,pchVirtAddr[minor] ); printk("Allocated: virt=%p (aligned=%d), phys=0x%llx (aligned=%d)\n",vPtr, IS_ALIGNED((unsigned long)vPtr, 16),dma_handle, !(dma_handle & 0xF)); #endif if(dma_list_sgA[minor][iListCntA] != NULL) { kfree(dma_list_sgA[minor][iListCntA]); dma_list_sgA[minor][iListCntA] = NULL; } dma_list_sgA[minor][iListCntA] = kmalloc_array(result,sizeof(struct scatterlist),GFP_KERNEL); sg_init_table(dma_list_sgA[minor][iListCntA],result) ; printk("SGLIST_CHA .. in for Length of cnt:%d %d %d \n",iListCntA,ulLength,uloffset1) ; sg_set_buf(&dma_list_sgA[minor][iListCntA][0], vPtr, count); dma_list_sgcntA[minor][iListCntA] = result; printk(" dma_list_sgcntA %d ",dma_list_sgcntA[minor][iListCntA]) ; result = dma_map_sg(gDev[minor],dma_list_sgA[minor][iListCntA],dma_list_sgcntA[minor][iListCntA],DMA_TO_DEVICE); dma_list_sgcntA[minor][iListCntA] = result; #ifdef DEBUG printk("SGLIST_CHA : No of Dma Mapped List %d\n",dma_list_sgcntA[minor][iListCntA]); #endif printk("SGLIST_CHA Length of cnt:%lu %ld \n",iListCntA,count) ; ITPDmaExecutionRoutine(dma_list_sgA[minor][iListCntA],minor,dma_list_sgcntA[minor][iListCntA]); } if(uchChannel == CHA) { pages_mem_sgA[iListCntA]= kmalloc_array(NoPages, sizeof(struct page*),GFP_KERNEL); //down_read(¤t->mm->mmap_sem); mmap_read_lock(current->mm); result= pin_user_pages(uaddr & PAGE_MASK, // start virtual address NoPages, // number of pages FOLL_WRITE | FOLL_FORCE, // gup_flags pages_mem_sgA[iListCntA],// output pages NULL); // no VMA info needed //up_read(¤t->mm->mmap_sem); mmap_read_unlock(current->mm); if (result == 0) { NoPages = (err >= 0) ? err : 0; #ifdef DEBUG printk("get_user_pages: err=%d [%lld]\n",result,NoPages); #endif return err < 0 ? err : -EINVAL; } ulPages_memA[iListCntA] = result; vPtr = kmap(pages_mem_sgA[iListCntA][0]); pchVirtAddr[minor] = (char*)vPtr; pchVirtAddr[minor] = pchVirtAddr[minor] + ulOffset; #ifdef DEBUG printk("CHA : No of Pages Mapped %d\n",result); #endif ulTotalLen = 0; for(i = 0;i< result;i++) { if(i == 0) { ulLength = PAGE_SIZE - ulOffset; ulTotalLen = ulLength; uloffset1 = ulOffset; } else if(i == (result - 1)) { ulLength = count - ulTotalLen; uloffset1 = 0; } else { ulLength = PAGE_SIZE; ulTotalLen = ulTotalLen + ulLength; uloffset1 = 0; } if(i == 0) { if(dma_mem_sgA[minor][iListCntA] != NULL) { kfree(dma_mem_sgA[minor][iListCntA]) ; dma_mem_sgA[minor][iListCntA] = NULL ; } dma_mem_sgA[minor][iListCntA] = kmalloc_array(DMA_MAX_LIST_COUNT,sizeof(struct scatterlist),GFP_KERNEL); sg_init_table(dma_mem_sgA[minor][iListCntA],result) ; } //printk("CHA .. in for Length of cnt:%d %d %d \n",iListCntA,ulLength,uloffset1) ; sg_set_page(&dma_mem_sgA[minor][iListCntA][i],pages_mem_sgA[iListCntA][i],ulLength,uloffset1) ; dma_mem_sgcntA[minor][iListCntA] = result; } result = dma_map_sg(gDev[minor],dma_mem_sgA[minor][iListCntA], dma_mem_sgcntA[minor][iListCntA],DMA_FROM_DEVICE); dma_mem_sgcntA[minor][iListCntA] = result; #ifdef DEBUG printk("CHA :No of Dma Mapped List %d\n",dma_mem_sgcntA[minor][iListCntA]); #endif printk(" CHA Length of cnt:%lu %ld \n",iListCntA,count) ; ITPDmaExecutionRoutine(dma_mem_sgA[minor][iListCntA],minor,dma_mem_sgcntA[minor][iListCntA]); dma_sync_sg_for_device(gDev[minor], dma_mem_sgA[minor][iListCntA],dma_mem_sgcntA[minor][iListCntA],DMA_FROM_DEVICE ); ulDmaLength[minor][0] = count ; lDmaRemndr[minor][0] = 0 ; bDevCloseStatusA[minor] = 0 ; } return 0; } void ITPDmaExecutionRoutine(struct scatterlist *sglist,int minor,int NumberOfElements) { uint32_t *desc ; UCHAR ucQwrdAlign; UCHAR uchFlag = 0; UINT i; int index = minor; dma_addr_t ulNextDmaList = 0,alligned_addr ; struct scatterlist *sgEntry ; if(uchChannel == SGLIST_CHA) { uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT; ulStartPtrA[iListCntA] = sg_dma_address(&sglist[0]); #ifdef DEBUG if (ulStartPtrA[iListCntA] >> 32) { printk("ERROR: DMA address has upper 32 bits set, but hardware supports only 32-bit!\n"); } else { printk("ulStartPtrA %llx",ulStartPtrA[iListCntA] ) ; } #endif ucQwrdAlign = (UCHAR)(0x10 - (ulStartPtrA[iListCntA] & 0x0F)); ucQwrdAlign = 0 ; alligned_addr = ulStartPtrA[iListCntA] + ucQwrdAlign; ulStartPtrA[iListCntA] = ulStartPtrA[iListCntA] + ucQwrdAlign; ulStartPtrA[iListCntA] = ulStartPtrA[iListCntA] | uchFlag; pDmaListA[iListCntA] = (DMA_LIST*)(pchVirtAddr[index] + ucQwrdAlign); #ifdef DEBUG printk( "ITPDmaExecutionRoutine : Device %d : sglistA %d iListCntA %ld ucQwrdAlign %d pDmaListA %llx \n",index,NumberOfElements,iListCntA,ucQwrdAlign,pDmaListA[iListCntA] ); #endif iListCntA++; if(iListCntA == MAX_LIST_COUNT) { dma_sync_single_for_device(gDev[minor],ulStartPtrA[0],sizeof(DMA_LIST),DMA_TO_DEVICE); writel( ulStartPtrA[0] ,(UINT *)EEPROM_Mapped_Addr[minor] + DMADPR0/4); iListCntA = 0; #ifdef DEBUG printk("To UINT (*)EEPROM_Mapped_Addr[minor] + DMADPR0/4) %llx ",ulStartPtrA[0] ) ; alligned_addr = readl((u32 *)EEPROM_Mapped_Addr[minor] + DMADPR0/4); printk("From UINT (*)EEPROM_Mapped_Addr[minor] + DMADPR0/4) %llx ",alligned_addr) ; #endif } } if(uchChannel == CHA) { uchFlag = PCI_DESC | DMA_READ | ENABLE_TERM_INT; for_each_sg(sglist,sgEntry,NumberOfElements,i) { ulNextDmaList = (ulStartPtrA[iListCntA] & 0xFFFFFFF0) + (sizeof(DMA_LIST)*(i + 1)); alligned_addr = sg_dma_address(sgEntry); pDmaListA[iListCntA][i].u32PADR = lower_32_bits(alligned_addr) ; pDmaListA[iListCntA][i].u32LADR = LOCAL_DEVICE_ADDRESS_A; pDmaListA[iListCntA][i].u32SIZ = sg_dma_len(sgEntry); pDmaListA[iListCntA][i].u32DPR = ulNextDmaList | uchFlag ; pDmaListA[iListCntA][i].u32HPADR = upper_32_bits(alligned_addr) ; #ifdef DEBUG printk("sg_dma_address %08llX u32PADR %x:%x u32LADR %x u32SIZ %x u32DPR %x ",sg_dma_address(sgEntry),pDmaListA[iListCntA][i].u32HPADR,pDmaListA[iListCntA][i].u32PADR ,pDmaListA[iListCntA][i].u32LADR ,pDmaListA[iListCntA][i].u32SIZ ,pDmaListA[iListCntA][i].u32DPR ) ; #endif } pDmaListA[iListCntA][i - 1].u32DPR = ulStartPtrA[iListCntA + 1]; ulTotalDmaListA[index][iListCntA]= NumberOfElements; printk("pDmaListA[iListCntA][i - 1].u32DPR [%lu][%d]ulStartPtrA[iListCntA + 1] %lu = %x",iListCntA,i - 1,iListCntA + 1,pDmaListA[iListCntA][i - 1].u32DPR ) ; iListCntA++; if(iListCntA == MAX_LIST_COUNT) { //pDmaListA[iListCntA - 1][i - 1].u32DPR = ulNextDmaList | END_OF_CHAIN; pDmaListA[iListCntA - 1][i - 1].u32DPR = (ulStartPtrA[0] & 0xFFFFFFF0) | uchFlag ; #ifdef DEBUG printk("iListCntA == MAX_LIST_COUNT [%lu][%d] = %x",iListCntA - 1,i - 1,pDmaListA[iListCntA - 1][i - 1].u32DPR ) ; #endif iListCntA = 0; desc = (uint32_t*)(dma_coherent_virtA[0] ) ; printk("PHY to VIRT :: %X = PADR : %08X LADR %08X SIZ %08X, DPR %08X",(uint32_t*)(dma_coherent_virtA[0]) ,desc[0],desc[1],desc[2],desc[3]); } } } ssize_t ITP_write(struct file *filp, const char *buf, size_t count, loff_t *offset) { int minor,i=0,ret= 0 ; Uchar bufVal[10] ; minor = MINOR(filp->f_path.dentry->d_inode->i_rdev); //MINOR(filp->f_dentry->d_inode->i_rdev); if(minor == 0) return 0; else minor = minor -1; #ifdef DEBUG printk("Device Writing Operation Minor: %d\n",minor); #endif ret = copy_from_user(bufVal,(UCHAR*)buf,sizeof(UCHAR)*count) ; return 0; } irqreturn_t ITP_isr(int i, void *dev_id) { int j,minor = 0; //struct pid *pPidVal = NULL ; ULONG ulChanReg,ulLocalInt=0,ulLocalInt2 = 0 ; UCHAR uchClrIntReg; printk(" In ISR") ; for(j = 0;j= ulTotalDmaListA[minor][ulListCntA[minor]]) { ulLocalInt = readl((UINT *)PCM_Mapped_Addr[minor]+ 0x20); ulIntCntA[minor] = 0; chBuffId[0] = (UCHAR)ulListCntA[minor]; if(pPidVal[minor][0]) { kill_pid(pPidVal[minor][0],SignalNo[minor][0], 1); } else printk("ERROR: CHA Sending signal Failed\n"); ulListCntA[minor]++; if(ulListCntA[minor] == MAX_LIST_COUNT) ulListCntA[minor] = 0; } } return IRQ_HANDLED; } void ITP_do_tasklet(struct tasklet_struct *unused) { int minor; minor = h; #ifdef DEBUG printk("DRIVER:TASKLET routine %d\n",minor); #endif } void simple_vma_open(struct vm_area_struct *vma) { printk(KERN_NOTICE "Simple VMA open, virt %lx, phys %lx\n", vma->vm_start, vma->vm_pgoff vm_pgoff); printk("DRIVER:MMAP routine size %lX\n",vma->vm_end - vma->vm_start); #endif result = remap_pfn_range( vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot ); if(result) return -EAGAIN; vma->vm_ops = &simple_remap_vm_ops; simple_vma_open(vma); return 0; } void ITP_unallocate(struct device *dev,int minor,int ch) { int i,j; printk("Deallocating Lists\n") ; } //int ITP_control(struct inode *inode, struct file *filp, unsigned int command, unsigned long argument) long ITP_control( struct file *filp, unsigned int command, unsigned long argument) { int g_iMinor_No,TotalCard,minor,iData = 0; ITP_VERSION stVersion; UCHAR uchData = 0; ULONG ulData = 0,ulAddr = 0,ulDataArr[10]; USHORT usData = 0; char retVal; g_iMinor_No = MINOR ( filp->f_path.dentry->d_inode->i_rdev ); minor = g_iMinor_No; switch(command) { case ITP_TOTAL_DEVICES: TotalCard = count; retVal = copy_to_user((int*)argument,&TotalCard ,sizeof(int)); break; case ITP_SET_SIGNAL_NO : retVal = copy_from_user(ulDataArr,(PULONG)argument,sizeof(ULONG) * 10); ulData = ulDataArr[0]; SignalNo[minor][ulData] = ulDataArr[1]; Pid[minor][ulData] = ulDataArr[2]; pPidVal[minor][ulData] = find_vpid(Pid[minor][ulData]) ; //printk("%ld %ld %ld\n",ulData,SignalNo[minor][ulData],Pid[minor][ulData]); break; case ITP_GET_PHYSICAL_ADDR : retVal = copy_from_user(&ulData,(PULONG)argument,sizeof(ULONG)); iData = (int)ulData; ulData = (ULONG)BaseAddress[iData]; retVal = copy_to_user((PULONG)argument,&ulData ,sizeof(ULONG)); break; case ITP_STOP_DEV_A: bDevCloseStatusA[minor] = 1 ; printk("Stop Device invoked in CHA \n") ; break ; case ITP_STOP_DEV_B: bDevCloseStatusB[minor] = 1 ; printk("Stop Device invoked in CHB \n") ; break ; default : break; } return 0; } struct file_operations ITP_fops = { read : ITP_read, write : ITP_write, open : ITP_open, release : ITP_close, unlocked_ioctl : ITP_control, mmap : ITP_mmap, owner : THIS_MODULE, }; static struct pci_driver ITP_driver = { .name = "PCIITP", .id_table = ITP, .probe = probe, //.remove = __devexit_p(remove), }; int __init ITP_Init(void) { int status,i; printk("/********** PciITP Module Init********************************/\n"); status = pci_register_driver(&ITP_driver); if(status >= 0) { printk("Pci registeraion succeeded\n"); } else { printk("Pci registeraion Failed\n"); } cdev_init(&Struct_ITP,&ITP_fops); Struct_ITP.owner = THIS_MODULE; Struct_ITP.ops = &ITP_fops; i = 0; for(i = 0;i Подробнее здесь: [url]https://stackoverflow.com/questions/79710844/linux-pci-dma-with-scatter-gather-not-triggering-interrupt[/url]