sgbuf.c 4.61 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10
/*
 * Scatter-Gather buffer
 *
 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
 */

#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
11
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
#include <sound/memalloc.h>
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#include "memalloc_local.h"

struct snd_sg_page {
	void *buf;
	dma_addr_t addr;
};

struct snd_sg_buf {
	int size;	/* allocated byte size */
	int pages;	/* allocated pages */
	int tblsize;	/* allocated table size */
	struct snd_sg_page *table;	/* address table */
	struct page **page_table;	/* page table (for vmap/vunmap) */
	struct device *dev;
};
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30

/* table entries are align to 32 */
#define SGBUF_TBL_ALIGN		32
Clemens Ladisch's avatar
Clemens Ladisch committed
31
#define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
Linus Torvalds's avatar
Linus Torvalds committed
32

33
static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38
{
	struct snd_sg_buf *sgbuf = dmab->private_data;
	struct snd_dma_buffer tmpb;
	int i;

39 40
	if (!sgbuf)
		return;
Linus Torvalds's avatar
Linus Torvalds committed
41

42
	vunmap(dmab->area);
43 44
	dmab->area = NULL;

Linus Torvalds's avatar
Linus Torvalds committed
45
	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
46 47
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
Linus Torvalds's avatar
Linus Torvalds committed
48 49
	tmpb.dev.dev = sgbuf->dev;
	for (i = 0; i < sgbuf->pages; i++) {
50 51
		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
			continue; /* continuous pages */
Linus Torvalds's avatar
Linus Torvalds committed
52
		tmpb.area = sgbuf->table[i].buf;
53 54
		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
55 56 57 58 59 60 61 62 63
		snd_dma_free_pages(&tmpb);
	}

	kfree(sgbuf->table);
	kfree(sgbuf->page_table);
	kfree(sgbuf);
	dmab->private_data = NULL;
}

64 65
#define MAX_ALLOC_PAGES		32

66
static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
Linus Torvalds's avatar
Linus Torvalds committed
67 68
{
	struct snd_sg_buf *sgbuf;
69
	unsigned int i, pages, chunk, maxpages;
Linus Torvalds's avatar
Linus Torvalds committed
70
	struct snd_dma_buffer tmpb;
71 72
	struct snd_sg_page *table;
	struct page **pgtable;
73 74
	int type = SNDRV_DMA_TYPE_DEV;
	pgprot_t prot = PAGE_KERNEL;
75
	void *area;
Linus Torvalds's avatar
Linus Torvalds committed
76

77
	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
78
	if (!sgbuf)
79
		return NULL;
80 81
	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
		type = SNDRV_DMA_TYPE_DEV_WC;
82 83 84 85
#ifdef pgprot_noncached
		prot = pgprot_noncached(PAGE_KERNEL);
#endif
	}
86
	sgbuf->dev = dmab->dev.dev;
Linus Torvalds's avatar
Linus Torvalds committed
87 88
	pages = snd_sgbuf_aligned_pages(size);
	sgbuf->tblsize = sgbuf_align_table(pages);
89 90
	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
	if (!table)
Linus Torvalds's avatar
Linus Torvalds committed
91
		goto _failed;
92 93 94
	sgbuf->table = table;
	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
	if (!pgtable)
Linus Torvalds's avatar
Linus Torvalds committed
95
		goto _failed;
96
	sgbuf->page_table = pgtable;
Linus Torvalds's avatar
Linus Torvalds committed
97

98 99 100 101 102 103 104 105
	/* allocate pages */
	maxpages = MAX_ALLOC_PAGES;
	while (pages > 0) {
		chunk = pages;
		/* don't be too eager to take a huge chunk */
		if (chunk > maxpages)
			chunk = maxpages;
		chunk <<= PAGE_SHIFT;
106
		if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
107 108
						 chunk, &tmpb) < 0) {
			if (!sgbuf->pages)
109
				goto _failed;
110
			size = sgbuf->pages * PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
111 112
			break;
		}
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
		chunk = tmpb.bytes >> PAGE_SHIFT;
		for (i = 0; i < chunk; i++) {
			table->buf = tmpb.area;
			table->addr = tmpb.addr;
			if (!i)
				table->addr |= chunk; /* mark head */
			table++;
			*pgtable++ = virt_to_page(tmpb.area);
			tmpb.area += PAGE_SIZE;
			tmpb.addr += PAGE_SIZE;
		}
		sgbuf->pages += chunk;
		pages -= chunk;
		if (chunk < maxpages)
			maxpages = chunk;
Linus Torvalds's avatar
Linus Torvalds committed
128 129 130
	}

	sgbuf->size = size;
131 132
	area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
	if (!area)
Linus Torvalds's avatar
Linus Torvalds committed
133
		goto _failed;
134
	return area;
Linus Torvalds's avatar
Linus Torvalds committed
135 136

 _failed:
137
	snd_dma_sg_free(dmab); /* free the table */
138
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
139
}
140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
				      size_t offset)
{
	struct snd_sg_buf *sgbuf = dmab->private_data;
	dma_addr_t addr;

	addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
	addr &= ~((dma_addr_t)PAGE_SIZE - 1);
	return addr + offset % PAGE_SIZE;
}

static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
					size_t offset)
{
	struct snd_sg_buf *sgbuf = dmab->private_data;
	unsigned int idx = offset >> PAGE_SHIFT;

	if (idx >= (unsigned int)sgbuf->pages)
		return NULL;
	return sgbuf->page_table[idx];
}

static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
					      unsigned int ofs,
					      unsigned int size)
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
{
	struct snd_sg_buf *sg = dmab->private_data;
	unsigned int start, end, pg;

	start = ofs >> PAGE_SHIFT;
	end = (ofs + size - 1) >> PAGE_SHIFT;
	/* check page continuity */
	pg = sg->table[start].addr >> PAGE_SHIFT;
	for (;;) {
		start++;
		if (start > end)
			break;
		pg++;
		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
			return (start << PAGE_SHIFT) - ofs;
	}
	/* ok, all on continuous pages */
	return size;
}
185 186 187 188 189 190 191 192

const struct snd_malloc_ops snd_dma_sg_ops = {
	.alloc = snd_dma_sg_alloc,
	.free = snd_dma_sg_free,
	.get_addr = snd_dma_sg_get_addr,
	.get_page = snd_dma_sg_get_page,
	.get_chunk_size = snd_dma_sg_get_chunk_size,
};