forked from ZaidQureshi/bam
-
Notifications
You must be signed in to change notification settings - Fork 0
/
nvm_dma.h
183 lines (134 loc) · 5.84 KB
/
nvm_dma.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
#ifndef __NVM_DMA_H__
#define __NVM_DMA_H__
// #ifndef __CUDACC__
// #define __device__
// #define __host__
// #endif
#include <nvm_types.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __DIS_CLUSTER__
#include <sisci_types.h>
#endif
/*
* Create DMA mapping descriptor from physical/bus addresses.
*
* Create a DMA mapping descriptor, describing a region of memory that is
* accessible for the NVM controller. The caller must supply physical/bus
* addresses of physical memory pages, page size and total number of pages.
* As the host's page size may differ from the controller's page size (MPS),
* this function will calculate the necessary offsets into the actual memory
* pages.
*
* While virtual memory is assumed to be continuous, the physical pages do not
* need to be contiguous. Physical/bus addresses must be aligned to the
* controller's page size.
*
* Note: vaddr can be NULL.
*/
int nvm_dma_map(nvm_dma_t** map, // Mapping descriptor reference
const nvm_ctrl_t* ctrl, // NVM controller reference
void* vaddr, // Pointer to userspace memory (can be NULL if not required)
size_t page_size, // Physical page size
size_t n_pages, // Number of pages to map
const uint64_t* page_addrs); // List of physical/bus addresses to the pages
/*
* Create DMA mapping descriptor using offsets from a previously
* created DMA descriptor.
*/
int nvm_dma_remap(nvm_dma_t** new_map, const nvm_dma_t* other_map);
/*
* Remove DMA mapping descriptor.
*
* Unmap DMA mappings (if necessary) and remove the descriptor.
* This function destroys the descriptor.
*/
void nvm_dma_unmap(nvm_dma_t* map);
/*
* Create DMA mapping descriptor from virtual address using the kernel module.
* This function is similar to nvm_dma_map, except the user is not required
* to pass physical/bus addresses.
*
* Note: vaddr can not be NULL, and must be aligned to system page size.
*/
int nvm_dma_map_host(nvm_dma_t** map, const nvm_ctrl_t* ctrl, void* vaddr, size_t size);
//#if ( defined( __CUDA__ ) || defined( __CUDACC__ ) )
/*
* Create DMA mapping descriptor from CUDA device pointer using the kernel
* module. This function is similar to nvm_dma_map_host, except the memory
* pointer must be a valid CUDA device pointer (see manual for
* cudaGetPointerAttributes).
*
* The controller handle must have been created using the kernel module.
*
* Note: vaddr can not be NULL, and must be aligned to GPU page size.
*/
int nvm_dma_map_device(nvm_dma_t** map, const nvm_ctrl_t* ctrl, void* devptr, size_t size);
//#endif /* __CUDA__ */
#if defined( __DIS_CLUSTER__ )
/*
* Create DMA mapping descriptor from local SISCI segment.
*
* Create DMA mapping descriptor from a local segment handler, and
* reverse-map the segment making it accessible from the controller.
* As segment memory is always continuous and page-aligned, it is not
* necessary to calculate physical memory addresses. However, the user
* should ensure that the mapping size is aligned to a controller
* page-size (MPS).
*
* The controller handle must have been created using SmartIO, and
* the segment must already be prepared on the local adapter.
*/
int nvm_dis_dma_map_local(nvm_dma_t** map, // Mapping descriptor reference
const nvm_ctrl_t* ctrl, // NVM controller handle
uint32_t dis_adapter, // Local DIS adapter segment is prepared on
sci_local_segment_t segment, // Local segment descriptor
bool map_vaddr); // Should function also map segment into local space
#endif /* __DIS_CLUSTER__ */
#if defined( __DIS_CLUSTER__ )
/*
* Create DMA mapping descriptor from remote SISCI segment.
*
* Create DMA mapping descriptor from a remote segment handler, and
* reverse-map the segment making it accessible from the controller.
* This function is similar to nvm_dis_dma_map_local.
*
* The remote segment must already be connected.
*
* Note: You should generally prefer write combining, except
* for mapped device registers that require fine-grained writes.
*/
int nvm_dis_dma_map_remote(nvm_dma_t** map, // Mapping descriptor reference
const nvm_ctrl_t* ctrl, // NVM controller handle
sci_remote_segment_t segment,// Remote segment descriptor
bool map_vaddr, // Should function also map segment into local space
bool map_wc); // Should function map with write combining
#endif /* __DIS_CLUSTER__ */
#if ( !defined( __CUDA__ ) && !defined( __CUDACC__ ) ) && ( defined (__unix__) )
/*
* Short-hand function for allocating a page aligned buffer and mapping it
* for the controller.
*
* Note: this function will not work if you are using the CUDA API
*/
int nvm_dma_create(nvm_dma_t** map, const nvm_ctrl_t* ctrl, size_t size);
#endif
#if defined( __DIS_CLUSTER__ )
/*
* Create device memory segment and map it for the controller.
* Short-hand function for creating a device memory segment.
* If mem_hints is 0, the API will create a local segment instead.
*/
int nvm_dis_dma_create(nvm_dma_t** map, const nvm_ctrl_t* ctrl, size_t size, unsigned int mem_hints);
#endif /* __DIS_CLUSTER__ */
#if defined ( __DIS_CLUSTER__ )
/*
* Note: This function requires the IOMMU to be enabled.
*/
int nvm_dis_dma_map_host(nvm_dma_t** map, const nvm_ctrl_t* ctrl, void* vaddr, size_t size);
#endif
#if ( ( defined( __CUDA__ ) || defined( __CUDACC__ ) ) && defined( __DIS_CLUSTER__ ) )
int nvm_dis_dma_map_device(nvm_dma_t** map, const nvm_ctrl_t* ctrl, void* devptr, size_t size);
#endif /* __DIS_CLUSTER__ && __CUDA__ */
#endif /* __NVM_DMA_H__ */