|
64 | 64 |
|
65 | 65 | #ifdef CONFIG_AS_VFP_VMRS_FPINST
|
66 | 66 |
|
67 |
| -#define fmrx(_vfp_) ({ \ |
68 |
| - u32 __v; \ |
69 |
| - asm(".fpu vfpv2\n" \ |
70 |
| - "vmrs %0, " #_vfp_ \ |
71 |
| - : "=r" (__v) : : "cc"); \ |
72 |
| - __v; \ |
73 |
| - }) |
74 |
| - |
75 |
| -#define fmxr(_vfp_,_var_) \ |
76 |
| - asm(".fpu vfpv2\n" \ |
77 |
| - "vmsr " #_vfp_ ", %0" \ |
78 |
| - : : "r" (_var_) : "cc") |
| 67 | +#define fmrx(_vfp_) ({ \ |
| 68 | + u32 __v; \ |
| 69 | + asm volatile (".fpu vfpv2\n" \ |
| 70 | + "vmrs %0, " #_vfp_ \ |
| 71 | + : "=r" (__v) : : "cc"); \ |
| 72 | + __v; \ |
| 73 | +}) |
| 74 | + |
| 75 | +#define fmxr(_vfp_, _var_) ({ \ |
| 76 | + asm volatile (".fpu vfpv2\n" \ |
| 77 | + "vmsr " #_vfp_ ", %0" \ |
| 78 | + : : "r" (_var_) : "cc"); \ |
| 79 | +}) |
79 | 80 |
|
80 | 81 | #else
|
81 | 82 |
|
82 | 83 | #define vfpreg(_vfp_) #_vfp_
|
83 | 84 |
|
84 |
| -#define fmrx(_vfp_) ({ \ |
85 |
| - u32 __v; \ |
86 |
| - asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ |
87 |
| - : "=r" (__v) : : "cc"); \ |
88 |
| - __v; \ |
89 |
| - }) |
90 |
| - |
91 |
| -#define fmxr(_vfp_,_var_) \ |
92 |
| - asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ |
93 |
| - : : "r" (_var_) : "cc") |
| 85 | +#define fmrx(_vfp_) ({ \ |
| 86 | + u32 __v; \ |
| 87 | + asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \ |
| 88 | + "cr0, 0 @ fmrx %0, " #_vfp_ \ |
| 89 | + : "=r" (__v) : : "cc"); \ |
| 90 | + __v; \ |
| 91 | +}) |
| 92 | + |
| 93 | +#define fmxr(_vfp_, _var_) ({ \ |
| 94 | + asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \ |
| 95 | + "cr0, 0 @ fmxr " #_vfp_ ", %0" \ |
| 96 | + : : "r" (_var_) : "cc"); \ |
| 97 | +}) |
94 | 98 |
|
95 | 99 | #endif
|
96 | 100 |
|
|
0 commit comments