1
1
/*
2
- * Copyright (c) 2015-2016, Linaro Limited
2
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
3
3
*
4
4
* This software is licensed under the terms of the GNU General Public
5
5
* License version 2, as published by the Free Software Foundation, and
11
11
* GNU General Public License for more details.
12
12
*
13
13
*/
14
+ #include <linux/anon_inodes.h>
14
15
#include <linux/device.h>
15
- #include <linux/dma-buf.h>
16
- #include <linux/fdtable.h>
17
16
#include <linux/idr.h>
17
+ #include <linux/mm.h>
18
18
#include <linux/sched.h>
19
19
#include <linux/slab.h>
20
20
#include <linux/tee_drv.h>
21
21
#include "tee_private.h"
22
22
23
- static void tee_shm_release (struct tee_shm * shm )
23
+ static void tee_shm_release (struct tee_device * teedev , struct tee_shm * shm )
24
24
{
25
- struct tee_device * teedev = shm -> teedev ;
26
25
struct tee_shm_pool_mgr * poolm ;
27
26
28
- mutex_lock (& teedev -> mutex );
29
- idr_remove (& teedev -> idr , shm -> id );
30
- if (shm -> ctx )
31
- list_del (& shm -> link );
32
- mutex_unlock (& teedev -> mutex );
33
-
34
27
if (shm -> flags & TEE_SHM_DMA_BUF )
35
28
poolm = & teedev -> pool -> dma_buf_mgr ;
36
29
else
@@ -42,53 +35,6 @@ static void tee_shm_release(struct tee_shm *shm)
42
35
tee_device_put (teedev );
43
36
}
44
37
45
- static struct sg_table * tee_shm_op_map_dma_buf (struct dma_buf_attachment
46
- * attach , enum dma_data_direction dir )
47
- {
48
- return NULL ;
49
- }
50
-
51
- static void tee_shm_op_unmap_dma_buf (struct dma_buf_attachment * attach ,
52
- struct sg_table * table ,
53
- enum dma_data_direction dir )
54
- {
55
- }
56
-
57
- static void tee_shm_op_release (struct dma_buf * dmabuf )
58
- {
59
- struct tee_shm * shm = dmabuf -> priv ;
60
-
61
- tee_shm_release (shm );
62
- }
63
-
64
- static void * tee_shm_op_map_atomic (struct dma_buf * dmabuf , unsigned long pgnum )
65
- {
66
- return NULL ;
67
- }
68
-
69
- static void * tee_shm_op_map (struct dma_buf * dmabuf , unsigned long pgnum )
70
- {
71
- return NULL ;
72
- }
73
-
74
- static int tee_shm_op_mmap (struct dma_buf * dmabuf , struct vm_area_struct * vma )
75
- {
76
- struct tee_shm * shm = dmabuf -> priv ;
77
- size_t size = vma -> vm_end - vma -> vm_start ;
78
-
79
- return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
80
- size , vma -> vm_page_prot );
81
- }
82
-
83
- static const struct dma_buf_ops tee_shm_dma_buf_ops = {
84
- .map_dma_buf = tee_shm_op_map_dma_buf ,
85
- .unmap_dma_buf = tee_shm_op_unmap_dma_buf ,
86
- .release = tee_shm_op_release ,
87
- .map_atomic = tee_shm_op_map_atomic ,
88
- .map = tee_shm_op_map ,
89
- .mmap = tee_shm_op_mmap ,
90
- };
91
-
92
38
/**
93
39
* tee_shm_alloc() - Allocate shared memory
94
40
* @ctx: Context that allocates the shared memory
@@ -135,6 +81,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
135
81
goto err_dev_put ;
136
82
}
137
83
84
+ refcount_set (& shm -> refcount , 1 );
138
85
shm -> flags = flags ;
139
86
shm -> teedev = teedev ;
140
87
shm -> ctx = ctx ;
@@ -157,29 +104,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
157
104
goto err_pool_free ;
158
105
}
159
106
160
- if (flags & TEE_SHM_DMA_BUF ) {
161
- DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
162
-
163
- exp_info .ops = & tee_shm_dma_buf_ops ;
164
- exp_info .size = shm -> size ;
165
- exp_info .flags = O_RDWR ;
166
- exp_info .priv = shm ;
167
-
168
- shm -> dmabuf = dma_buf_export (& exp_info );
169
- if (IS_ERR (shm -> dmabuf )) {
170
- ret = ERR_CAST (shm -> dmabuf );
171
- goto err_rem ;
172
- }
173
- }
174
107
mutex_lock (& teedev -> mutex );
175
108
list_add_tail (& shm -> link , & ctx -> list_shm );
176
109
mutex_unlock (& teedev -> mutex );
177
110
178
111
return shm ;
179
- err_rem :
180
- mutex_lock (& teedev -> mutex );
181
- idr_remove (& teedev -> idr , shm -> id );
182
- mutex_unlock (& teedev -> mutex );
183
112
err_pool_free :
184
113
poolm -> ops -> free (poolm , shm );
185
114
err_kfree :
@@ -190,6 +119,31 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
190
119
}
191
120
EXPORT_SYMBOL_GPL (tee_shm_alloc );
192
121
122
+ static int tee_shm_fop_release (struct inode * inode , struct file * filp )
123
+ {
124
+ tee_shm_put (filp -> private_data );
125
+ return 0 ;
126
+ }
127
+
128
+ static int tee_shm_fop_mmap (struct file * filp , struct vm_area_struct * vma )
129
+ {
130
+ struct tee_shm * shm = filp -> private_data ;
131
+ size_t size = vma -> vm_end - vma -> vm_start ;
132
+
133
+ /* check for overflowing the buffer's size */
134
+ if (vma -> vm_pgoff + vma_pages (vma ) > shm -> size >> PAGE_SHIFT )
135
+ return - EINVAL ;
136
+
137
+ return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
138
+ size , vma -> vm_page_prot );
139
+ }
140
+
141
+ static const struct file_operations tee_shm_fops = {
142
+ .owner = THIS_MODULE ,
143
+ .release = tee_shm_fop_release ,
144
+ .mmap = tee_shm_fop_mmap ,
145
+ };
146
+
193
147
/**
194
148
* tee_shm_get_fd() - Increase reference count and return file descriptor
195
149
* @shm: Shared memory handle
@@ -203,10 +157,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
203
157
if ((shm -> flags & req_flags ) != req_flags )
204
158
return - EINVAL ;
205
159
206
- get_dma_buf (shm -> dmabuf );
207
- fd = dma_buf_fd (shm -> dmabuf , O_CLOEXEC );
160
+ /* matched by tee_shm_put() in tee_shm_op_release() */
161
+ refcount_inc (& shm -> refcount );
162
+ fd = anon_inode_getfd ("tee_shm" , & tee_shm_fops , shm , O_RDWR );
208
163
if (fd < 0 )
209
- dma_buf_put (shm -> dmabuf );
164
+ tee_shm_put (shm );
210
165
return fd ;
211
166
}
212
167
@@ -216,17 +171,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
216
171
*/
217
172
void tee_shm_free (struct tee_shm * shm )
218
173
{
219
- /*
220
- * dma_buf_put() decreases the dmabuf reference counter and will
221
- * call tee_shm_release() when the last reference is gone.
222
- *
223
- * In the case of driver private memory we call tee_shm_release
224
- * directly instead as it doesn't have a reference counter.
225
- */
226
- if (shm -> flags & TEE_SHM_DMA_BUF )
227
- dma_buf_put (shm -> dmabuf );
228
- else
229
- tee_shm_release (shm );
174
+ tee_shm_put (shm );
230
175
}
231
176
EXPORT_SYMBOL_GPL (tee_shm_free );
232
177
@@ -327,10 +272,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
327
272
teedev = ctx -> teedev ;
328
273
mutex_lock (& teedev -> mutex );
329
274
shm = idr_find (& teedev -> idr , id );
275
+ /*
276
+ * If the tee_shm was found in the IDR it must have a refcount
277
+ * larger than 0 due to the guarantee in tee_shm_put() below. So
278
+ * it's safe to use refcount_inc().
279
+ */
330
280
if (!shm || shm -> ctx != ctx )
331
281
shm = ERR_PTR (- EINVAL );
332
- else if ( shm -> flags & TEE_SHM_DMA_BUF )
333
- get_dma_buf ( shm -> dmabuf );
282
+ else
283
+ refcount_inc ( & shm -> refcount );
334
284
mutex_unlock (& teedev -> mutex );
335
285
return shm ;
336
286
}
@@ -353,7 +303,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_id);
353
303
*/
354
304
void tee_shm_put (struct tee_shm * shm )
355
305
{
356
- if (shm -> flags & TEE_SHM_DMA_BUF )
357
- dma_buf_put (shm -> dmabuf );
306
+ struct tee_device * teedev = shm -> teedev ;
307
+ bool do_release = false;
308
+
309
+ mutex_lock (& teedev -> mutex );
310
+ if (refcount_dec_and_test (& shm -> refcount )) {
311
+ /*
312
+ * refcount has reached 0, we must now remove it from the
313
+ * IDR before releasing the mutex. This will guarantee
314
+ * that the refcount_inc() in tee_shm_get_from_id() never
315
+ * starts from 0.
316
+ */
317
+ if (shm -> ctx )
318
+ list_del (& shm -> link );
319
+ do_release = true;
320
+ }
321
+ mutex_unlock (& teedev -> mutex );
322
+
323
+ if (do_release )
324
+ tee_shm_release (teedev , shm );
358
325
}
359
326
EXPORT_SYMBOL_GPL (tee_shm_put );
0 commit comments