View difference between Paste ID: V9jB1rQN and R0kSUT6i
SHOW: | | - or go back to the newest paste.
1-
d3dkmthk.h got some updates:
1+
C:\Program Files (x86)\Windows Kits\10\Include\10.0.10586.0\km\ndis.h got some updates:
2
3-
D3DKMT_MAX_MULTIPLANE_OVERLAY_ALLOCATIONS_PER_PLANE increased from 32 to 265 compared to TH1 10240:
3+
NDIS Version is now 6.51:
4
5-
#define D3DKMT_MAX_MULTIPLANE_OVERLAY_ALLOCATIONS_PER_PLANE   256
5+
Definitions with NDIS version numbers may use any of the following:
6
7-
///////
7+
        Version     First available in
8
        ------------------------------------------------------------------
9-
KMTQUERYADAPTERINFOTYPE has a new entry:
9+
        651         Windows 10
10
        650         Windows 10
11-
typedef enum _KMTQUERYADAPTERINFOTYPE
11+
12-
{
12+
13-
     KMTQAITYPE_QUERY_HW_PROTECTION_TEARDOWN_COUNT = 36,
13+
#if (NDIS_SUPPORT_NDIS651)
14-
#endif // DXGKDDI_INTERFACE_VERSION_WDDM2_0
14+
#define NDIS_STATUS_WWAN_PRESHUTDOWN_STATE                  ((NDIS_STATUS)0x40041033L)
15-
} KMTQUERYADAPTERINFOTYPE;
15+
#endif
16
17-
////////
17+
18
//
19-
D3DKMT_ESCAPETYPE has also new entries:
19+
// status codes used in NDIS 6.51
20
//
21-
typedef enum _D3DKMT_ESCAPETYPE
21+
#if NDIS_SUPPORT_NDIS651
22-
{
22+
#define NDIS_STATUS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE  ((NDIS_STATUS)STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE)
23-
    D3DKMT_ESCAPE_HMD_GET_EDID_BASE_BLOCK    = 26,
23+
#define NDIS_STATUS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE     ((NDIS_STATUS)STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE)
24-
    D3DKMT_ESCAPE_WIN32K_HMD_ENUM                   = 1032,
24+
#define NDIS_STATUS_DOT11_AP_CHANNEL_NOT_ALLOWED              ((NDIS_STATUS)STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED)
25-
    D3DKMT_ESCAPE_WIN32K_HMD_CONTROL                = 1033
25+
#define NDIS_STATUS_DOT11_AP_BAND_NOT_ALLOWED                 ((NDIS_STATUS)STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED)
26-
} D3DKMT_ESCAPETYPE;
26+
#endif
27
28-
////
28+
29
#if (NDIS_SUPPORT_NDIS650)
30-
new structs:
30+
31
#pragma warning(push)
32-
typedef struct _D3DKMT_SETHWPROTECTIONTEARDOWNRECOVERY
32+
#pragma warning(disable:4201) // (nonstandard extension used : nameless struct/union)
33-
{
33+
#pragma warning(disable:4214) // (extension used : bit field types other than int)
34-
    D3DKMT_HANDLE                   hAdapter;      // in: adapter handle
34+
35-
    BOOL                            Recovered;     // in: HW protection teardown recovery
35+
typedef struct _NDIS_PD_QUEUE NDIS_PD_QUEUE;
36-
} D3DKMT_SETHWPROTECTIONTEARDOWNRECOVERY;
36+
DECLARE_HANDLE(NDIS_PD_COUNTER_HANDLE);
37
DECLARE_HANDLE(NDIS_PD_FILTER_HANDLE);
38-
typedef struct _D3DKMT_FLUSHHEAPTRANSITIONS
38+
39-
{
39+
typedef PHYSICAL_ADDRESS DMA_LOGICAL_ADDRESS;
40-
    D3DKMT_HANDLE              hAdapter;
40+
41-
} D3DKMT_FLUSHHEAPTRANSITIONS;
41+
typedef struct _PD_BUFFER_8021Q_INFO {
42
    UINT16 UserPriority:3;
43-
////
43+
    UINT16 CanonicalFormatId:1;
44
    UINT16 VlanId:12;
45-
D3DKMT_MIRACAST_DEVICE_STATUS has a new entry:
45+
} PD_BUFFER_8021Q_INFO;
46
47-
typedef enum
47+
typedef struct _PD_BUFFER_VIRTUAL_SUBNET_INFO {
48-
{
48+
    UINT32 VirtualSubnetId:24;
49-
    D3DKMT_MIRACAST_DEVICE_STATUS_CANCELLED                 = 0x8000000B,
49+
    UINT32 Reserved:8;
50-
} D3DKMT_MIRACAST_DEVICE_STATUS;
50+
} PD_BUFFER_VIRTUAL_SUBNET_INFO;
51
52-
///
52+
//
53
// If an L2 packet is represented by multiple PD_BUFFERs, the first PD_BUFFER
54-
new functions:
54+
// must have the PD_BUFFER_FLAG_PARTIAL_PACKET_HEAD flag set and the
55
// NextPartialPDBuffer field must point to the partial PD_BUFFERs that
56-
typedef _Check_return_ NTSTATUS (APIENTRY *PFND3DKMT_FLUSHHEAPTRANSITIONS)(_In_ D3DKMT_FLUSHHEAPTRANSITIONS*);
56+
// constitute the whole packet. Each of the partial PD_BUFFERs must point to
57-
typedef _Check_return_ NTSTATUS (APIENTRY *PFND3DKMT_SETHWPROTECTIONTEARDOWNRECOVERY)(_In_ D3DKMT_SETHWPROTECTIONTEARDOWNRECOVERY*);
57+
// the next partial PD_BUFFER by using the NextPartialPDBuffer as opposed to
58
// the NextPDBuffer field which must be NULL in all partial PD_BUFFERs except
59-
EXTERN_C _Check_return_ NTSTATUS APIENTRY D3DKMTFlushHeapTransitions(_In_ D3DKMT_FLUSHHEAPTRANSITIONS*);
59+
// for the head buffer. All partial PD_BUFFERs except for the head buffer must
60-
EXTERN_C _Check_return_ NTSTATUS APIENTRY D3DKMTSetHwProtectionTeardownRecovery(_In_ D3DKMT_SETHWPROTECTIONTEARDOWNRECOVERY*);
60+
// have the PD_BUFFER_FLAG_PARTIAL_PACKET_HEAD cleared. The last partial
61
// PD_BUFFER must have its NextPartialPDBuffer field set to NULL. The total
62
// length of the L2 packet is the sum of DataLength fields from each partial
63
// PD_BUFFER. The head PD_BUFFER must contain up to and including the IP
64
// transport (TCP, UDP, SCTP, etc) header. In the case of encap or double-encap,
65
// the innermost IP transport header must be contained in the head PD_BUFFER.
66
//
67
#define PD_BUFFER_FLAG_PARTIAL_PACKET_HEAD  0x00000001
68
69
//
70
// A PD_BUFFER allocated with its own accompanying data buffer will have
71
// this attribute set. PD_BUFFER attibutes must never be modified by PD clients
72
// or PD providers.
73
//
74
#define PD_BUFFER_ATTR_BUILT_IN_DATA_BUFFER 0x00000001
75
76
//
77
// All PD_BUFFERs posted to an RX queue will have the DataStart field
78
// set a value >= PD_BUFFER_MIN_RX_DATA_START_VALUE by the PD client.
79
// While a PD_BUFFER is pending in an RX queue, PD provider can use the
80
// portion of the data buffer between DataBufferDmaLogicalAddress and
81
// (DataBufferDmaLogicalAddress + DataStart) for its own purpose. As soon as
82
// a PD_BUFFER is drained by the PD client from the RX queue, PD client can
83
// use the same portion of data buffer for its own purpose. A corollary of
84
// this is that neither the PD client nor the PD provider can expect the
85
// contents of this portion of the data buffer to be preserved as soon
86
// as the control of the PD_BUFFER is transferred. A PD_BUFFER is under
87
// the control of a PD provider when the PD_BUFFER is sitting in
88
// a queue owned by that PD provider (i.e., posted to the queue, and has not
89
// yet been drained out from the queue by the PD client). Otherwise, the
90
// PD_BUFFER is under the control of the PD client.
91
//
92
#define PD_BUFFER_MIN_RX_DATA_START_VALUE 32
93
94
//
95
// All PD_BUFFERs posted to an RX queue will satisfy the following
96
// alignment requirement:
97
//
98
// ((DataBufferDmaLogicalAddress + DataStart)
99
//     & (PD_BUFFER_MIN_RX_DATA_START_ALIGNMENT - 1)) == 0
100
//
101
#define PD_BUFFER_MIN_RX_DATA_START_ALIGNMENT 2
102
103
//
104
// All PD_BUFFERs posted to an TX queue will satisfy the following
105
// alignment requirement:
106
//
107
// ((DataBufferDmaLogicalAddress + DataStart)
108
//     & (PD_BUFFER_MIN_TX_DATA_START_ALIGNMENT - 1)) == 0
109
//
110
#define PD_BUFFER_MIN_TX_DATA_START_ALIGNMENT 2
111
112
//
113
// PD_BUFFER structure represents a PD packet (or a portion of a PD packet).
114
// The actual memory location which is used for storing the packet data is
115
// indicated by the DataBufferDmaLogicalAdress and DataBufferVirtualAdress
116
// fields. The former field represents the address which the PD provider
117
// must use for DMA. The latter field represents the address which host
118
// software can use to access/modify the packet contents.
119
// The DataBufferDmaLogicalAdress is valid for *all* PD-capable NDIS miniport
120
// adapters in the same IOMMU domain.
121
//
122
typedef struct _PD_BUFFER {
123
124
    struct _PD_BUFFER* NextPDBuffer;
125
    struct _PD_BUFFER* NextPartialPDBuffer;
126
127
    //
128
    // Reserved for PD client use. PD provider must never modify this field.
129
    //
130
    PVOID PDClientReserved;
131
132
    //
133
    // Neither PD client nor PD provider is allowed to modify this field.
134
    // If PD client has allocated the PD_BUFFER with a non-zero value for
135
    // ClientContextSize, PDClientContext refers to a buffer of size
136
    // ClientContextSize. Otherwise, this field is NULL.
137
    //
138
    _Field_size_bytes_(PDClientContextSize) PVOID PDClientContext;
139
140
    //
141
    // This field denotes the original virtual address of the allocated data
142
    // buffer. The actual packet data is always at
143
    // DataBufferVirtualAddress+DataStart. Neither the PD provider nor the
144
    // PD platform ever modify the value of this field after PD_BUFFER
145
    // initialization.
146
    //
147
    _Field_size_bytes_(DataBufferSize)
148
    PUCHAR DataBufferVirtualAddress;
149
150
    //
151
    // This field denotes the original DMA logical address of the allocated
152
    // data buffer. The actual packet data is always at
153
    // DataBufferDmaLogicalAddress+DataStart. Neither the PD provider nor the
154
    // PD platform ever modify the value of this field after PD_BUFFER
155
    // initialization.
156
    //
157
    _Field_size_bytes_(DataBufferSize)
158
    DMA_LOGICAL_ADDRESS DataBufferDmaLogicalAddress;
159
160
    //
161
    // This is the total size of the allocated data buffer. Neither the PD
162
    // provider nor the PD platform ever modify the value of this field
163
    // after PD_BUFFER initialization. This is a ULONG (as opposed to a
164
    // USHORT) mainly because of LSO.
165
    //
166
    ULONG DataBufferSize;
167
168
    //
169
    // If non-zero, this is the size of the buffer pointed by PDClientContext.
170
    // The value of this field must never be modified except by the PD platform.
171
    // The PD platform does NOT change the value of this field after PD_BUFFER
172
    // allocation/initialization.
173
    //
174
    USHORT PDClientContextSize;
175
176
    //
177
    // See PD_BUFFER_ATTR_XXX. Must never be modified by PD provider.
178
    //
179
    USHORT Attributes;
180
181
    //
182
    // See PD_BUFFER_FLAG_XXX
183
    //
184
    USHORT Flags;
185
186
    //
187
    // DataStart denotes where the packet starts relative to the original
188
    // starting address of the allocated data buffer. PD provider never
189
    // modifies this field. PD provider adds this value to the
190
    // DataBufferDmaLogicalAddress value in order to derive the actual
191
    // target DMA address for packet reception/transmission. I.e., the
192
    // target DMA address value in the hardware receive/transmit descriptor
193
    // must be set to DataBufferDmaLogicalAddress+DataStart when a PD_BUFFER
194
    // is posted to a receive/transmit queue.
195
    //
196
    USHORT DataStart;
197
198
    //
199
    // When posting PD_BUFFERs to receive queues, DataLength is ignored by
200
    // the PD provider (see ReceiveDataLength description in PD queue creation).
201
    // When draining completed PD_BUFFERs from receive queues,
202
    // the PD provider stores the length of the received packet in the
203
    // DataLength field. The length does not include FCS or any stripped 801Q
204
    // headers.
205
    // When posting PD_BUFFERs to transmit queues, DataLength denotes the length
206
    // of the packet to be sent. When draining completed PD_BUFFERs from
207
    // transmit queues, the PD provider leaves DataLength field unmodified.
208
    //
209
    ULONG DataLength;
210
211
    union {
212
213
        struct {
214
            union {
215
                //
216
                // PD provider sets this to the filter context value obtained
217
                // from the matched filter that steered the packet to the receive
218
                // queue. Filter context values are specified by the PD clients
219
                // when configuring filters.
220
                //
221
                ULONG64 RxFilterContext;
222
223
                //
224
                // If one of the RxGftxxx bits are set, RxFilterContext value may
225
                // be used for GFT flow entry Id value.
226
                //
227
                ULONG64 GftFlowEntryId;
228
            };
229
            
230
            //
231
            // The hash value computed for the incoming packet
232
            // that is steered to the receve queue via RSS.
233
            //
234
            ULONG RxHashValue;
235
236
            //
237
            // Commonly used RX offload fields
238
            //
239
            union {
240
                struct {
241
                    ULONG RxIPHeaderChecksumSucceeded:1;
242
                    ULONG RxTCPChecksumSucceeded:1;
243
                    ULONG RxUDPChecksumSucceeded:1;
244
                    ULONG RxIPHeaderChecksumFailed:1;
245
                    ULONG RxTCPChecksumFailed:1;
246
                    ULONG RxUDPChecksumFailed:1;
247
                    ULONG RxHashComputed:1;
248
                    ULONG RxHashWithL4PortNumbers:1;
249
                    ULONG RxGftDirectionIngress:1;
250
                    ULONG RxGftExceptionPacket:1;
251
                    ULONG RxGftCopyPacket:1;
252
                    ULONG RxGftSamplePacket:1;
253
                    ULONG RxReserved1:4;
254
                    ULONG RxCoalescedSegCount:16; // RSC
255
                    ULONG RxRscTcpTimestampDelta; // RSC
256
                };
257
                ULONG RxOffloads[2];
258
            };
259
            
260
            //
261
            // Commonly used TX offload fields
262
            //
263
            union {
264
                struct {
265
                    ULONG TxIsIPv4:1; // Checksum, LSO
266
                    ULONG TxIsIPv6:1; // Checksum, LSO
267
                    ULONG TxTransportHeaderOffset:10; // Checksum, LSO, NVGRE
268
                    ULONG TxMSS:20; // LSO
269
                    ULONG TxComputeIPHeaderChecksum:1;
270
                    ULONG TxComputeTCPChecksum:1;
271
                    ULONG TxComputeUDPChecksum:1;
272
                    ULONG TxIsEncapsulatedPacket:1;
273
                    ULONG TxInnerPacketOffsetsValid:1;
274
                    ULONG TxReserved1:11;
275
                    ULONG TxInnerFrameOffset:8;
276
                    ULONG TxInnerIpHeaderRelativeOffset:6;
277
                    ULONG TxInnerIsIPv6:1;
278
                    ULONG TxInnerTcpOptionsPresent:1;
279
                };
280
                ULONG TxOffloads[2];
281
            };
282
283
            //
284
            // Other Meta Data
285
            //
286
            PD_BUFFER_VIRTUAL_SUBNET_INFO VirtualSubnetInfo;
287
            PD_BUFFER_8021Q_INFO Ieee8021qInfo;
288
            USHORT GftSourceVPortId;
289
290
            ULONG Reserved;
291
            
292
            //
293
            // A scratch field which the PD provider can use for its own
294
            // purposes while the PD_BUFFER is sitting in the provider
295
            // queue (i.e., posted by the client but not yet drained back
296
            // by the client). Once the PD_BUFFER is drained by the
297
            // client, there's no guarantee that the contents of this field
298
            // is going to be preserved.
299
            //
300
            UINT64 ProviderScratch;
301
        } MetaDataV0;
302
    };
303
} PD_BUFFER;
304
305
#pragma warning(pop)
306
307
//
308
// A NDIS_PD_QUEUE provides the following abstraction:
309
// The queue is a circular array of N slots.
310
// Overall state of the queue is tracked by 3 conceptual pointers: head (H),
311
// tail (T), and completed (C). H points to the slot which contains the next
312
// item that must be processed. T points to the slot which the next item
313
// posted by the client must be placed into. If H == T, the queue is empty.
314
// If H == (T+1)%N, the queue is full. Hence, the queue can hold at most N-1
315
// items. That is, NDIS_PD_QUEUE_PARAMETERS.QueueSize is N-1.
316
// C points to the slot which contains the earliest completed item
317
// which has not yet been drained by the client. If C == H, the queue has no
318
// items in completed state.
319
// All slots are initially empty, that is, H, T, and C all point to the same
320
// slot. All items posted to the queue should typically be processed and
321
// completed by the provider in the order they were posted but the PD client
322
// must not take a dependency on completions being strictly ordered. As the
323
// queue progresses, it will always have 0 or more empty slots followed by 0
324
// or more completed slots followed by 0 or more busy slots in a circular
325
// fashion. A "completed slot" refers to a slot which contains a completed item.
326
// Taking the simple case where no wrap-around has occured yet, C <= H <= T
327
// will always hold. Factoring in wrap-around, the following formulas will
328
// hold true at any point in time:
329
// The number of busy slots : (T-H)%N
330
// The number of completed slots : (H-C)%N
331
// The number of empty slots : (C-T-1)%N
332
//
333
334
//
335
// All of the NDIS_PD_QUEUE_DISPATCH routines are guaranteed to be invoked by
336
// the caller in serialized fashion on the same PD queue. The implementation of
337
// these routines MUST avoid acquiring any locks or performing interlocked
338
// operations (since the caller is responsible for dealing with concurrency,
339
// not the provider, hence the provider must not use any synchronization
340
// routines/primitives; if there's any need for synchronization in the core
341
// transmit/receive code path in the PD provider, we must know about it and
342
// do whatever necessary to remove that need.)
343
//
344
345
//
346
// PostAndDrain is the main data path function which allows posting PD_BUFFERs
347
// to PD transmit/receive queues and draining any previously posted
348
// PD buffers that have been completed. Provider removes buffers from the
349
// PostBufferList and places them into the queue, starting with the head
350
// buffer in the list and advancing to the next buffer until either the
351
// PostBufferList gets empty or the queue is full (or near full).
352
// Provider advances the PostListHead and returns the new list head to the
353
// caller. Provider also removes any completed buffers from the queue and
354
// inserts them to tail of the DrainBufferList and returns the new
355
// DrainBufferList tail to the client. Note that the provider should drain
356
// as many buffers as it can in order to open up room for the buffers being
357
// posted. The PostBufferList and DrainBufferList are guaranteed to be disjoint
358
// buffer lists (i.e., the PD client never provides the head of a buffer list
359
// as the PostBufferListHead and the tail of that same list as
360
// DrainBufferListTail). The provider must ensure that it never drains more than
361
// MaxDrainCount packets (a set of partial PD_BUFFERs that make up a single
362
// L2 packet count as 1). The PD client can pass an empty list via
363
// PostBufferListHead in order to just drain completed buffers without posting
364
// any new buffers. The PD client can pass 0 for MaxDrainCount in order to
365
// just post new buffers without draining any completed buffers. In rare cases,
366
// client may invoke the call with both an empty PostBufferList and 0 for
367
// MaxDrainCount, so the provider must NOT assume otherwise, and handle such
368
// a call properly (i.e., a no-op).
369
//
370
// An example code snippet illustrates the abundant pointer indirections
371
// best:
372
//
373
//      PD_BUFFER* PostHead = NULL;
374
//      PD_BUFFER** PostTail = &PostHead;
375
//      PD_BUFFER* DrainHead = NULL;
376
//      PD_BUFFER** DrainTail = &DrainHead;
377
//
378
//      PD_BUFFER* bufX = <allocated PD_BUFFER>;
379
//
380
//      bufX->NextPDBuffer = NULL;
381
//      *PostTail = bufX;
382
//      PostTail = &bufX->NextPDBuffer;
383
//
384
//      // Assume 20 PD_BUFFERs are present in the Post list just like
385
//      // bufX. Assume bufY is the 10th buffer and bufZ is the last buffer
386
//      // in Post list. Assume there are many previously posted buffers in
387
//      // the Queue and 5 of them are currently completed: buf1, ..., buf5.
388
//      // Assume the provider is able to accept only 9 buffers from the Post
389
//      // list and drains all 5 of the completed buffers. With these
390
//      // assumptions, the state of the Post and Drain lists before and
391
//      // after the following call returns is:
392
//
393
//      // BEFORE:
394
//      // PostHead == bufX
395
//      // PostTail == &bufZ->NextPDBuffer
396
//      // DrainHead == NULL
397
//      // DrainTail == &DrainHead
398
//
399
//      NDIS_PD_POST_AND_DRAIN_BUFFER_LIST(
400
//          Queue,
401
//          &PostHead,
402
//          &DrainTail,
403
//          32);
404
//
405
//      // AFTER:
406
//      // PostHead == bufY
407
//      // PostTail == &bufZ->NextPDBuffer
408
//      // DrainHead == buf1
409
//      // DrainTail == &buf5->NextPDBuffer
410
//
411
typedef
412
_IRQL_requires_min_(PASSIVE_LEVEL)
413
_IRQL_requires_max_(DISPATCH_LEVEL)
414
_IRQL_requires_same_
415
_Function_class_(NDIS_PD_POST_AND_DRAIN_BUFFER_LIST)
416
VOID
417
(NDIS_PD_POST_AND_DRAIN_BUFFER_LIST)(
418
    _Inout_ NDIS_PD_QUEUE* Queue,
419
    _Inout_ PD_BUFFER** PostBufferListHead,
420
    _Inout_ PD_BUFFER*** DrainBufferListTail,
421
    _In_ ULONG MaxDrainCount
422
    );
423
424
typedef NDIS_PD_POST_AND_DRAIN_BUFFER_LIST
425
    *NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_HANDLER;
426
427
//
428
// PostandDrainEx is exactly the same as PostandDrain with a few additional
429
// output parameters:
430
// *QueueDepthThresholdReached is set to TRUE by the provider if the queue
431
// depth (as explained in the NDIS_PD_QUERY_QUEUE_DEPTH function) is below
432
// the threshold value set by the client for a receive queue or is above
433
// the threshold value set by the client for a transmit queue; otherwise,
434
// PD provider sets it to FALSE. 
435
// *DrainCount is set to the number of PD_BUFFERs that the provider has
436
// appended to the DrainBufferList. A set of partial PD_BUFFERs that
437
// make up a single L2 packet is counted as 1. 
438
// *DrainCount is always <= MaxDrainCount.
439
// *PostCount is set to the number of PD_BUFFERs that the provider has
440
// removed from the PostbufferList. A set of partial PD_BUFFERs that
441
// make up a single L2 packet is counted as 1.
442
//
443
typedef struct _NDIS_PD_POST_AND_DRAIN_ARG {
444
    _Inout_ PD_BUFFER* PostBufferListHead;
445
    _Inout_ PD_BUFFER** DrainBufferListTail;
446
    _In_ ULONG MaxDrainCount;
447
    _Out_ ULONG DrainCount;
448
    _Out_ ULONG PostCount;
449
    _Out_ BOOLEAN QueueDepthThresholdReached;
450
} NDIS_PD_POST_AND_DRAIN_ARG;
451
452
typedef
453
_IRQL_requires_min_(PASSIVE_LEVEL)
454
_IRQL_requires_max_(DISPATCH_LEVEL)
455
_IRQL_requires_same_
456
_Function_class_(NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_EX)
457
VOID
458
(NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_EX)(
459
    _Inout_ NDIS_PD_QUEUE* Queue,
460
    _Inout_ NDIS_PD_POST_AND_DRAIN_ARG* Arg
461
    );
462
463
typedef NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_EX
464
    *NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_EX_HANDLER;
465
466
//
467
// This routine ensures that any item that's not yet in completed state
468
// in the queue will be completed imminently. The caller is responsible for
469
// waiting for and draining all previously posted requests before closing
470
// the queue. The caller must not post any further PD buffers to the queue
471
// after this call (i.e., the queue is not usable for transmiting/receiving
472
// packets any more via PD). The primary use case for this routine is flushing
473
// the receive queues, i.e., if there's no incoming traffic, posted buffers
474
// will sit in the receive queue indefinitely, but we need to drain the
475
// queue before we can close it, hence we need to flush it first. The same
476
// issue does not exist for transmit queues in practice since transmit requests
477
// will not pend indefinitely, but providers must honor the flush call for
478
// transmit queues as well anyway (which may be a no-op if the provider knows
479
// that the pending transmit request will complete very soon anyway, which is
480
// the typical case except for L2 flow control possibly).
481
//
482
typedef
483
_IRQL_requires_min_(PASSIVE_LEVEL)
484
_IRQL_requires_max_(DISPATCH_LEVEL)
485
_IRQL_requires_same_
486
_Function_class_(NDIS_PD_FLUSH_QUEUE)
487
VOID
488
(NDIS_PD_FLUSH_QUEUE)(
489
    _Inout_ NDIS_PD_QUEUE* Queue
490
    );
491
492
typedef NDIS_PD_FLUSH_QUEUE *NDIS_PD_FLUSH_QUEUE_HANDLER;
493
494
//
495
// Queue depth query returns the number of PD_PACKETs that are posted
496
// to a NDIS_PD_QUEUE but are not yet being processed by the provider.
497
// On a receive queue, this is the number of PD_BUFFERs that are available
498
// for placing incoming packets into. If 10 buffers were posted, and 3 of
499
// them have already been fetched by the provider for DMA'ing incoming
500
// packets into, the query should return 7 (not 10). On a transmit queue,
501
// this is the number of PD_BUFFERs which the provider has not yet fetched
502
// from the queue for transmitting.
503
// Ability to monitor the queue depth is very important for
504
// PD clients in order to assess congestion build-up and take precautionary
505
// action. An increasing queue depth for a TX queue is a sign of increasing
506
// congestion on the outbound link. A decreasing queue depth for a RX queue
507
// is a sign of the PD client not being able to process incoming packets
508
// fast enough on the inbound link. PD clients may need to monitor the
509
// queue depth status in a very fine grain fashion (i.e., find out if the queue
510
// depth has reached a certain level during each Post-And-Operation). Such PD
511
// clients use the PostAndDrainEx call. PD clients may also need to query the
512
// actual queue depth (in contrast to whether the queue depth is above or below
513
// a certain level) as well, which the NDIS_PD_QUERY_QUEUE_DEPTH function is 
514
// for. Performance of PostAndDrainEx implementation is very critical for PD
515
// clients. NDIS_PD_QUERY_QUEUE_DEPTH, while also very important, is not
516
// expected to be used by the PD clients with the same frequency as the
517
// PostAndDrain or PostAndDrainEx operations, hence it is acceptable for
518
// NDIS_PD_QUERY_QUEUE_DEPTH to be more expensive in exchange for returning the
519
// actual queue depth (in contrast to whether or not queue depth is above/below
520
// a given threshold). Note that it is understood by PD clients that the
521
// actual queue depth in the hw may have already changed and the returned queue
522
// depth may not precisely reflect it. As long as there isn't a too large lag
523
// between the real and returned queue depths and the lag itself is reasonably
524
// stable (for a stable incoming traffic rate), this information is still useful
525
// for the PD clients.
526
//
527
typedef
528
_IRQL_requires_min_(PASSIVE_LEVEL)
529
_IRQL_requires_max_(DISPATCH_LEVEL)
530
_IRQL_requires_same_
531
_Function_class_(NDIS_PD_QUERY_QUEUE_DEPTH)
532
VOID
533
(NDIS_PD_QUERY_QUEUE_DEPTH)(
534
    _In_ CONST NDIS_PD_QUEUE* Queue,
535
    _Out_ ULONG64* Depth
536
    );
537
538
typedef NDIS_PD_QUERY_QUEUE_DEPTH *NDIS_PD_QUERY_QUEUE_DEPTH_HANDLER;
539
540
typedef struct _NDIS_PD_QUEUE_DISPATCH {
541
    //
542
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
543
    // Header.Revision = NDIS_PD_QUEUE_DISPATCH_REVISION_1;
544
    // Header.Size >= NDIS_SIZEOF_PD_QUEUE_DISPATCH_REVISION_1;
545
    //
546
    NDIS_OBJECT_HEADER Header;
547
    ULONG Flags; // Reserved. Must be set to 0.
548
    NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_HANDLER PDPostAndDrainBufferList;
549
    NDIS_PD_QUERY_QUEUE_DEPTH_HANDLER PDQueryQueueDepth;
550
    NDIS_PD_FLUSH_QUEUE_HANDLER PDFlushQueue;
551
    NDIS_PD_POST_AND_DRAIN_BUFFER_LIST_EX_HANDLER PDPostAndDrainBufferListEx;
552
} NDIS_PD_QUEUE_DISPATCH;
553
554
#define NDIS_PD_QUEUE_DISPATCH_REVISION_1 1
555
#define NDIS_SIZEOF_PD_QUEUE_DISPATCH_REVISION_1 \
556
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_QUEUE_DISPATCH, PDPostAndDrainBufferListEx)
557
558
typedef struct _NDIS_PD_QUEUE {
559
    //
560
    // Header.Type = NDIS_OBJECT_TYPE_PD_TRANSMIT_QUEUE or NDIS_OBJECT_TYPE_PD_RECEIVE_QUEUE;
561
    // Header.Revision = NDIS_PD_QUEUE_REVISION_1;
562
    // Header.Size >= NDIS_SIZEOF_PD_QUEUE_REVISION_1;
563
    //
564
    NDIS_OBJECT_HEADER Header;
565
    ULONG Flags; // Reserved. Must be set to 0.
566
    CONST NDIS_PD_QUEUE_DISPATCH* Dispatch;
567
    PVOID PDPlatformReserved[2];
568
    PVOID PDClientReserved[2];
569
} NDIS_PD_QUEUE;
570
571
#define NDIS_PD_QUEUE_REVISION_1 1
572
#define NDIS_SIZEOF_PD_QUEUE_REVISION_1 \
573
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_QUEUE, PDClientReserved)
574
575
//
576
// PD and NIC SWITCH COEXISTENCE:
577
// Unless otherwise is explicitly noted for a specific PD OID, all
578
// PD OID requests are VPort specific when there's a NIC switch. That is,
579
// PD queues are created, PD filters are set, PD counters are created all
580
// per Vport. When there's no NIC switch, PD client uses NDIS_DEFAULT_VPORT_ID,
581
// which must be handled by the PD provider as if the scope of the
582
// OID request is the whole miniport adapter.
583
//
584
585
typedef enum {
586
    PDQueueTypeUnknown,
587
    PDQueueTypeReceive,
588
    PDQueueTypeTransmit,
589
    PDQueueTypeMax
590
} NDIS_PD_QUEUE_TYPE;
591
592
//
593
// QueueSize is the maximum number of PD_BUFFERs that can be posted to the
594
// queue and it is always equal to a number of the form (2^K)-1 (e.g., 63,
595
// 127, 255, 511, 1023, etc). This lends itself to efficient circular index
596
// arithmetic (i.e., can use "&" as opposed to "%" for index wrap-around).
597
//
598
// For receive queues, ReceiveDataLength denotes the minimum length of
599
// the data buffers that will be posted to the receive queue, i.e.,
600
// (PD_BUFFER.DataBufferSize - PD_BUFFER.DataStart) >= ReceiveDataLength for
601
// all PD_BUFFERs posted to the receive queue.
602
//
603
// Affinity is a hint to PD provider for performance optimization. PD platform
604
// will primarily be processing the queue on procs indicated by the Affinity
605
// mask. But, note that this is NOT a strict requirement on the PD platform.
606
// Hence the PD provider must NOT use the Affinity info for anything other
607
// than performance optimization. For example, PD provider should use the
608
// Affinity for direct access to the proper cache for packet DMA, if supported
609
// by the underlying hardware platform.
610
//
611
612
//
613
// If this flag is set in the NDIS_PD_QUEUE_PARAMETERS Flags field, the provider
614
// must be prepared to handle drain notification requests from the PD client
615
// on the particular queue being allocated. This flag is valid for both RX and
616
// TX queues. After the PD client requests a drain notification, it must issue
617
// another PDPostAndDrainBufferList before waiting for a notification. This
618
// drain request right after the arm request ensures that any items that might
619
// have been completed after the previous PDPostAndDrainBufferList request but
620
// right before the arm request are drained back by the client before starting
621
// to wait for a notification which may never come unless new items are
622
// completed. This "poll/arm/poll" model removes the synchronization burden
623
// from the provider and puts it in the PD client/platform.
624
//
625
#define NDIS_PD_QUEUE_FLAG_DRAIN_NOTIFICATION 0x00000001
626
627
//
628
// A PD provider sets this value in the NDIS_PD_QUEUE_PARAMETERS TrafficClassId
629
// field when returning a PD Rx queue via NdisPDAcquireReceiveQueues if the 
630
// PD provider can handle multiple traffic classes via a single Rx queue for
631
// a given RSS target processor.
632
//
633
#define NDIS_PD_INVALID_TRAFFIC_CLASS_ID ((ULONG)-1)
634
635
typedef struct DECLSPEC_ALIGN(8) _NDIS_PD_QUEUE_PARAMETERS {
636
    //
637
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
638
    // Header.Revision = NDIS_PD_QUEUE_PARAMETERS_REVISION_1;
639
    // Header.Size >= NDIS_SIZEOF_PD_QUEUE_PARAMETERS_REVISION_1;
640
    //
641
    NDIS_OBJECT_HEADER Header;
642
    ULONG Flags;
643
    NDIS_PD_QUEUE_TYPE QueueType; // transmit or receive
644
    ULONG QueueSize; // 2^K-1
645
    ULONG ReceiveDataLength; // min PD_BUFFER data length
646
    GROUP_AFFINITY Affinity;
647
    //
648
    // TrafficClassId (as defined in NDIS_QOS_PARAMETERS) for the PD queue.
649
    //
650
    ULONG TrafficClassId;
651
    //
652
    // For transmit queues, this is the maximum number of partial PD_BUFFERs
653
    // that the client is allowed to chain together to form a single L2 packet.
654
    // This must be less than or equal to the MaximumTxPartialBufferCount value
655
    // in NDIS_PD_CAPABILITIES.
656
    //
657
    // For receive queues, this is the maximum number of partial PD_BUFFERs
658
    // that the provider is allowed to chain together to form a single
659
    // large L2 packet with RSC. This must be less than or equal to the
660
    // MaximumRxPartialBufferCount value in NDIS_PD_CAPABILITIES.
661
    // Note that PD client never posts PD_BUFFERs with the partial flag to the
662
    // receive queue. PD client is always required to post PD_BUFFERs with at
663
    // least MTU-size space (starting from the DataStart position). Provider
664
    // performs chaining only in the case of RSC. Some providers may not be
665
    // able to support RSC chaining. Such providers advertize a value of 1
666
    // via MaximumRxPartialBufferCount in NDIS_PD_CAPABILITIES.
667
    // If a PD client wants to still use RSC over such a provider, PD client
668
    // must post large enough PD_BUFFERs.
669
    //
670
    ULONG MaximumPartialBufferCount;
671
    //
672
    // During PD queue creation, PD client can optionally provide a counter
673
    // handle. If a counter handle is provided, depending on the queue type,
674
    // PD provider must update the counter values as activity occurs on the
675
    // PD queue. This is a handle to a PD transmit queue counter for transmit
676
    // queues and PD receive queue counter for receive queues. PD client is
677
    // responsible for closing the counter handle only after the PD queue is
678
    // closed.
679
    //
680
    // For PD queues obtained via NDIS_PD_ACQUIRE_RECEIVE_QUEUES,
681
    // PD provider MUST return a dedicated PD receive queue counter
682
    // for each returned receive queue.
683
    //
684
    NDIS_PD_COUNTER_HANDLE CounterHandle;
685
} NDIS_PD_QUEUE_PARAMETERS;
686
687
#define NDIS_PD_QUEUE_PARAMETERS_REVISION_1 1
688
689
#define NDIS_SIZEOF_PD_QUEUE_PARAMETERS_REVISION_1 \
690
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_QUEUE_PARAMETERS, CounterHandle)
691
692
//
693
// If this flag is set in the NDIS_PD_ACQUIRE_QUEUES_PARAMETERS Flags field, the
694
// provider must be prepared to handle drain notification requests from the PD
695
// client on each of the individual RX queues returned.
696
//
697
#define NDIS_PD_ACQUIRE_QUEUES_FLAG_DRAIN_NOTIFICATION 0x00000001
698
699
typedef struct DECLSPEC_ALIGN(8) _NDIS_PD_ACQUIRE_QUEUES_PARAMETERS {
700
    //
701
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
702
    // Header.Revision = NDIS_PD_ACQUIRE_QUEUES_PARAMETERS_REVISION_1;
703
    // Header.Size >= NDIS_SIZEOF_PD_ACQUIRE_QUEUES_PARAMETERS_REVISION_1;
704
    //
705
    NDIS_OBJECT_HEADER Header;
706
    ULONG Flags;
707
} NDIS_PD_ACQUIRE_QUEUES_PARAMETERS;
708
709
#define NDIS_PD_ACQUIRE_QUEUES_PARAMETERS_REVISION_1 1
710
#define NDIS_SIZEOF_PD_ACQUIRE_QUEUES_PARAMETERS_REVISION_1 \
711
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_ACQUIRE_QUEUES_PARAMETERS, Flags)
712
713
typedef enum {
714
    PDCounterTypeUnknown,
715
    PDCounterTypeReceiveQueue,
716
    PDCounterTypeTransmitQueue,
717
    PDCounterTypeReceiveFilter,
718
    PDCounterTypeMax
719
} NDIS_PD_COUNTER_TYPE;
720
721
typedef struct _NDIS_PD_RECEIVE_QUEUE_COUNTER {
722
    ULONG64 PacketsReceived;
723
    ULONG64 BytesReceived;
724
    ULONG64 PacketsDropped;
725
} NDIS_PD_RECEIVE_QUEUE_COUNTER;
726
727
typedef struct _NDIS_PD_TRANSMIT_QUEUE_COUNTER {
728
    ULONG64 PacketsTransmitted;
729
    ULONG64 BytesTransmitted;
730
} NDIS_PD_TRANSMIT_QUEUE_COUNTER;
731
732
typedef struct _NDIS_PD_FILTER_COUNTER {
733
    ULONG64 PacketsMatched;
734
    ULONG64 BytesMatched;
735
} NDIS_PD_FILTER_COUNTER;
736
737
typedef union _NDIS_PD_COUNTER_VALUE {
738
    NDIS_PD_RECEIVE_QUEUE_COUNTER ReceiveQueue;
739
    NDIS_PD_TRANSMIT_QUEUE_COUNTER TransmitQueue;
740
    NDIS_PD_FILTER_COUNTER Filter;
741
} NDIS_PD_COUNTER_VALUE, *PNDIS_PD_COUNTER_VALUE;
742
743
typedef struct _NDIS_PD_COUNTER_PARAMETERS {
744
    //
745
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
746
    // Header.Revision = NDIS_PD_COUNTER_PARAMETERS_REVISION_1;
747
    // Header.Size >= NDIS_SIZEOF_PD_COUNTER_PARAMETERS_REVISION_1;
748
    //
749
    NDIS_OBJECT_HEADER Header;
750
    ULONG Flags; // Reserved. Must be set to 0 by client, ignored by provider
751
    //
752
    // CounterName is ignored by the PD provider. It is used by the PD platform
753
    // for publishing the counter to Windows Performance Counter subsystem (so
754
    // that the counter can be viewed via PerfMon and accessed by system APIs
755
    // programmatically).
756
    //
757
    PCWSTR CounterName;
758
    NDIS_PD_COUNTER_TYPE Type;
759
} NDIS_PD_COUNTER_PARAMETERS;
760
761
#define NDIS_PD_COUNTER_PARAMETERS_REVISION_1 1
762
#define NDIS_SIZEOF_PD_COUNTER_PARAMETERS_REVISION_1 \
763
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_COUNTER_PARAMETERS, Type)
764
765
typedef struct DECLSPEC_ALIGN(8) _NDIS_PD_FILTER_PARAMETERS {
766
    //
767
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
768
    // Header.Revision = NDIS_PD_FILTER_PARAMETERS_REVISION_1;
769
    // Header.Size >= NDIS_SIZEOF_PD_FILTER_PARAMETERS_REVISION_1;
770
    //
771
    NDIS_OBJECT_HEADER Header;
772
    ULONG Flags; // Reserved. Must be set to 0 by client, ignored by provider
773
    NDIS_GFP_PROFILE_ID MatchProfileId;
774
    ULONG Priority;
775
    NDIS_PD_COUNTER_HANDLE CounterHandle;
776
    NDIS_PD_QUEUE* TargetReceiveQueue;
777
    ULONG64 RxFilterContext;
778
    //
779
    // The following fields are used to describe an array of either
780
    // NDIS_GFP_HEADER_GROUP_EXACT_MATCH or NDIS_GFP_HEADER_GROUP_WILDCARD_MATCH
781
    // structures (determined by the MatchProfileId)
782
    //
783
    _Field_size_bytes_(HeaderGroupMatchArrayTotalSize)
784
    PUCHAR HeaderGroupMatchArray; // must be 8-byte aligned
785
    _Field_range_(0, MAXULONG/HeaderGroupMatchArrayElementSize)
786
    ULONG HeaderGroupMatchArrayNumElements;
787
    ULONG HeaderGroupMatchArrayElementSize;
788
    _Field_range_((HeaderGroupMatchArrayNumElements*HeaderGroupMatchArrayElementSize), MAXULONG)
789
    ULONG HeaderGroupMatchArrayTotalSize;
790
} NDIS_PD_FILTER_PARAMETERS;
791
792
#define NDIS_PD_FILTER_PARAMETERS_REVISION_1 1
793
#define NDIS_SIZEOF_PD_FILTER_PARAMETERS_REVISION_1 \
794
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_FILTER_PARAMETERS, HeaderGroupMatchArrayTotalSize)
795
796
797
DECLARE_HANDLE(NDIS_PD_PROVIDER_HANDLE);
798
799
typedef
800
_IRQL_requires_(PASSIVE_LEVEL)
801
_IRQL_requires_same_
802
_At_(*NdisPDQueue, __drv_allocatesMem(Mem))
803
_Function_class_(NDIS_PD_ALLOCATE_QUEUE)
804
NTSTATUS
805
(NDIS_PD_ALLOCATE_QUEUE)(
806
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle,
807
    _In_ CONST NDIS_PD_QUEUE_PARAMETERS* QueueParameters,
808
    _Outptr_ NDIS_PD_QUEUE** NdisPDQueue
809
    );
810
811
typedef NDIS_PD_ALLOCATE_QUEUE *NDIS_PD_ALLOCATE_QUEUE_HANDLER;
812
813
//
814
// Caller is responsible for ensuring that the PD queue is empty before
815
// issuing this call. Caller is also responsible for clearing all filters
816
// that target this queue before closing the queue.
817
//
818
typedef
819
_IRQL_requires_(PASSIVE_LEVEL)
820
_IRQL_requires_same_
821
_Function_class_(NDIS_PD_FREE_QUEUE)
822
VOID
823
(NDIS_PD_FREE_QUEUE)(
824
    _In_ __drv_freesMem(Mem) NDIS_PD_QUEUE* NdisPDQueue
825
    );
826
827
typedef NDIS_PD_FREE_QUEUE *NDIS_PD_FREE_QUEUE_HANDLER;
828
829
//
830
// This function allows a PD client to obtain
831
// PD-mode access to the NDIS receive queues. While in PD-mode,
832
// PD client uses the PD-post-and-drain API on the receive
833
// queues, and the PD provider stops indicating receive NBLs via the existing
834
// NDIS receive data path.
835
// Once PD client gets the list of NDIS_PD_QUEUE objects representing the NDIS
836
// receive queues, PD client typically decides which processor core to use
837
// for draining each individual receive queue. PD client uses the
838
// NDIS_PD_QUEUE_PARAMETERS.Affinity parameter returned by the provider for
839
// this purpose. PD provider must set NDIS_PD_QUEUE_PARAMETERS.Affinity to the
840
// processor core derived from the indirection table configured via
841
// OID_GEN_RECEIVE_SCALE_PARAMETERS.
842
// Note that the TX path is completely independent irrespective of whether
843
// a PD client is using PD on the receive queues or not. I.e., Miniport
844
// adapter must handle MiniportSendNetBufferLists/MiniportReturnNetBufferLists
845
// as usual even when a PD client puts the NDIS receive queues into PD-mode.
846
// PD client creates PD transmit queues for sending packets via PD. So,
847
// while PD client is sending PD_BUFFERs over PD transmit queues, existing
848
// NDIS protocol drivers or LWFs may also send packets via usual NDIS NBL APIs
849
// on the same miniport adapter.
850
//
851
typedef
852
_IRQL_requires_(PASSIVE_LEVEL)
853
_IRQL_requires_same_
854
_Function_class_(NDIS_PD_ACQUIRE_RECEIVE_QUEUES)
855
NTSTATUS
856
(NDIS_PD_ACQUIRE_RECEIVE_QUEUES)(
857
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle,
858
    _In_ CONST NDIS_PD_ACQUIRE_QUEUES_PARAMETERS* Parameters,
859
    _Out_writes_to_(*QueueCount, *QueueCount) NDIS_PD_QUEUE** NdisPDQueueArray,
860
    _Inout_ ULONG* QueueCount,
861
    _Out_writes_bytes_to_(*QueueParametersArraySize, *QueueParametersArraySize)
862
        NDIS_PD_QUEUE_PARAMETERS* QueueParametersArray,
863
    _Inout_ ULONG* QueueParametersArraySize,
864
    _Out_ ULONG* QueueParametersArrayElementSize
865
    );
866
867
typedef NDIS_PD_ACQUIRE_RECEIVE_QUEUES *NDIS_PD_ACQUIRE_RECEIVE_QUEUES_HANDLER;
868
869
//
870
// This function allows a PD client to stop using PD over
871
// NDIS receive queues previosuly acquired via NDIS_PD_ACQUIRE_RECEIVE_QUEUES,
872
// and instruct the PD provider to go back to the NBL-based NDIS receive data
873
// path operation. PD client will invoke this function only after all pending
874
// PD buffers are completed and drained (i.e., the receive queue is empty).
875
// PD client does this by issuing a PDFlush request and then drain all pending
876
// PD buffers until no pending buffers are left. Note that the PD client may
877
// invoke NDIS_PD_ACQUIRE_RECEIVE_QUEUES again in the future to acquire PD-mode
878
// access to the NDIS receive queues again.
879
//
880
typedef
881
_IRQL_requires_(PASSIVE_LEVEL)
882
_IRQL_requires_same_
883
_Function_class_(NDIS_PD_RELEASE_RECEIVE_QUEUES)
884
VOID
885
(NDIS_PD_RELEASE_RECEIVE_QUEUES)(
886
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle
887
    );
888
889
typedef NDIS_PD_RELEASE_RECEIVE_QUEUES *NDIS_PD_RELEASE_RECEIVE_QUEUES_HANDLER;
890
891
//
892
// This function allows the PD client to allocate a counter object.
893
// Receive queue counters are used for tracking receive queue activity.
894
// Transmit queue counters are used for tracking transmit queue activity.
895
// Filter counters are used for tracking filter match activity.
896
// Same counter object (of a given type) can be associated with multiple
897
// queue or filter objects. E.g., use receive counter RC1 for receive queues
898
// RQ1, RQ2, RQ3, and receive counter RC2 for receive queues RQ4 and RQ5.
899
//
900
typedef
901
_IRQL_requires_(PASSIVE_LEVEL)
902
_IRQL_requires_same_
903
_At_(*CounterHandle, __drv_allocatesMem(Mem))
904
_Function_class_(NDIS_PD_ALLOCATE_COUNTER)
905
NTSTATUS
906
(NDIS_PD_ALLOCATE_COUNTER)(
907
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle,
908
    _In_ CONST NDIS_PD_COUNTER_PARAMETERS* CounterParameters,
909
    _Out_ NDIS_PD_COUNTER_HANDLE* CounterHandle
910
    );
911
912
typedef NDIS_PD_ALLOCATE_COUNTER *NDIS_PD_ALLOCATE_COUNTER_HANDLER;
913
914
//
915
// PD client frees a counter only after closing
916
// the object it's associated with, i.e., first close all the queues which
917
// counter C1 was associated with, and then free counter C1.
918
//
919
typedef
920
_IRQL_requires_(PASSIVE_LEVEL)
921
_IRQL_requires_same_
922
VOID
923
_Function_class_(NDIS_PD_FREE_COUNTER)
924
(NDIS_PD_FREE_COUNTER)(
925
    _In_ __drv_freesMem(Mem) NDIS_PD_COUNTER_HANDLE CounterHandle
926
    );
927
928
typedef NDIS_PD_FREE_COUNTER *NDIS_PD_FREE_COUNTER_HANDLER;
929
930
typedef
931
_IRQL_requires_(PASSIVE_LEVEL)
932
_IRQL_requires_same_
933
_Function_class_(NDIS_PD_QUERY_COUNTER)
934
VOID
935
(NDIS_PD_QUERY_COUNTER)(
936
    _In_ NDIS_PD_COUNTER_HANDLE CounterHandle,
937
    _Out_ NDIS_PD_COUNTER_VALUE* CounterValue
938
    );
939
940
typedef NDIS_PD_QUERY_COUNTER *NDIS_PD_QUERY_COUNTER_HANDLER;
941
942
//
943
// This function is used for directing specific flows of packets
944
// to a specific PD receive queue. PD filters are applied before any spreading
945
// takes place. Hence, packet matching a PD filter can be placed into their
946
// dedicated PD queue, and rest of the packets can be spread via RSS as usual.
947
// The rules for plumbing filters is the same as GFT flows, i.e., the PD client
948
// is responsible for plumbing non-overlapping disambiguous filters ultimately.
949
// However, some PD providers may allow overlapping filters as long as the PD
950
// client can pass a Priority value that indicates which filter must be applied
951
// first. PD provider may fail filter set requests with
952
// STATUS_NOT_SUPPORTED if the client attemtps to set filters with
953
// conflicting profiles or overlapping match conditions. NDIS_PD_CAPABILITIES
954
// structure does not allow the provider to advertise all valid combinations
955
// of profiles that the PD client can use simultaneously. Hence, some of
956
// the capabilities are discovered by the PD client at runtime when/if the PD
957
// provider fails the filter set request with STATUS_NDIS_NOT_SUPPORTED.
958
//
959
typedef
960
_IRQL_requires_(PASSIVE_LEVEL)
961
_IRQL_requires_same_
962
_At_(*FilterHandle, __drv_allocatesMem(Mem))
963
_Function_class_(NDIS_PD_SET_RECEIVE_FILTER)
964
NTSTATUS
965
(NDIS_PD_SET_RECEIVE_FILTER)(
966
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle,
967
    _In_ CONST NDIS_PD_FILTER_PARAMETERS* FilterParameters,
968
    _Out_ NDIS_PD_FILTER_HANDLE* FilterHandle
969
    );
970
971
typedef NDIS_PD_SET_RECEIVE_FILTER *NDIS_PD_SET_RECEIVE_FILTER_HANDLER;
972
973
//
974
// After this function returns, it's
975
// guaranteed that no more newly arriving packet will match this filter.
976
// However, there may still be in-flight packets that have already matched this
977
// filter and they are on their way to being placed into the target receive
978
// queue (but they are not placed yet).
979
//
980
typedef
981
_IRQL_requires_(PASSIVE_LEVEL)
982
_IRQL_requires_same_
983
_Function_class_(NDIS_PD_CLEAR_RECEIVE_FILTER)
984
VOID
985
(NDIS_PD_CLEAR_RECEIVE_FILTER)(
986
    _In_ __drv_freesMem(Mem) NDIS_PD_FILTER_HANDLE FilterHandle
987
    );
988
989
typedef NDIS_PD_CLEAR_RECEIVE_FILTER *NDIS_PD_CLEAR_RECEIVE_FILTER_HANDLER;
990
991
//
992
// This function is used for arming a PD queue for getting a notification
993
// upon PD_BUFFER completion.
994
//
995
typedef
996
_IRQL_requires_(PASSIVE_LEVEL)
997
_IRQL_requires_same_
998
_Function_class_(NDIS_PD_REQUEST_DRAIN_NOTIFICATION)
999
VOID
1000
(NDIS_PD_REQUEST_DRAIN_NOTIFICATION)(
1001
    _Inout_ NDIS_PD_QUEUE* NdisPDQueue
1002
    );
1003
1004
typedef NDIS_PD_REQUEST_DRAIN_NOTIFICATION *NDIS_PD_REQUEST_DRAIN_NOTIFICATION_HANDLER;
1005
1006
//
1007
// NDIS_PD_QUEUE_CONTROL is primarily used for setting and/or querying various
1008
// queue properties. This is a synchronous routine called at PASSIVE_LEVEL, but
1009
// PD provider must avoid blocking/waiting within this call since this call
1010
// can be invoked by the PD clients from their primary packet processing loop
1011
// and any stall/wait within this function can degrade performance.
1012
// See the individual ControlCodes and the associated in/out buffer definitions
1013
// for specific operations.
1014
// PD platform/clients invoke this function always in serialized fashion over
1015
// a given NDIS_PD_QUEUE. But serilization relative to data path operations
1016
// is NOT guaranteed. For example, client may set a NdisPDQCTLModerationInterval
1017
// request on one thread while another thread may be issuing post&drain
1018
// and arm requests on the queue.
1019
// PD provider must return STATUS_NOT_SUPPORTED for a ControlType/ControlCode
1020
// combination that is not recognized/supported by the provider.
1021
// STATUS_PENDING is an illegal return value from this function.
1022
// PD platform validates the buffer sizes and the ControlTypes for all
1023
// currently defined ControlCodes before passing the request to the provider.
1024
//
1025
1026
typedef enum {
1027
    //
1028
    // INPUT to the PD provider, No OUTPUT from the PD provider
1029
    //
1030
    NdisPDCTL_IN,
1031
1032
    //
1033
    // OUTPUT from the PD provider, no INPUT to the PD provider
1034
    //
1035
    NdisPDCTL_OUT,
1036
1037
    //
1038
    // INPUT to the PD provider and OUTPUT from the PD provider
1039
    //
1040
    NdisPDCTL_INOUT, 
1041
} NDIS_PD_CONTROL_TYPE;
1042
1043
typedef enum {
1044
1045
    NdisPDQCTLUnknown,
1046
1047
    //
1048
    // Type: NdisPDCTL_IN
1049
    // InBuffer: ULONG (QueueDepthThreshold)
1050
    // InBufferSize: sizeof(ULONG)
1051
    //
1052
    // QueueDepthThreshold can be set by PD clients on PD queues at any
1053
    // point, but never concurrently with a PostAndDrainEx call on the same
1054
    // PD queue. As long as the queue depth of a receive queue is below the
1055
    // threshold value, PD provider must set *QueueDepthThresholdReached
1056
    // to TRUE before returning from PostAndDrainEx. As long as the queue
1057
    // depth of a transmit queue is above the threshold value, PD provider
1058
    // must set *QueueDepthThresholdReached to TRUE before returning from
1059
    // PostAndDrainEx. Otherwise, PD client must set *QueueDepthThresholdReached
1060
    // to FALSE before returning from PostAndDrainEx.
1061
    // The default value for QueueDepthThreshold is MAXULONG for a transmit
1062
    // queue, and 0 for a receive queue. That is, PD provider simply sets
1063
    // *QueueDepthThresholdReached to FALSE if NdisPDQCTLQueueDepthThreshold
1064
    // has never been issued on the queue.
1065
    //
1066
    NdisPDQCTLQueueDepthThreshold,
1067
1068
    //
1069
    // Type: NdisPDCTL_IN
1070
    // InBuffer: ULONG (ModerationInterval)
1071
    // InBufferSize: sizeof(ULONG)
1072
    // 
1073
    // Used for setting a notification ModerationInterval value on a given PD
1074
    // queue. ModerationInterval is the maximum number of nanoseconds that a
1075
    // provider can defer interrupting the host CPU after an armed PD queue
1076
    // goes into drainable state. If ModerationInterval is zero (which is the
1077
    // default value for any arm-able PD queue), the provider performs no
1078
    // interrupt moderation on the PD queue. If ModerationInterval is larger
1079
    // than the maximum moderation interval that the PD provider supports or
1080
    // if the PD provider's timer granularity is larger, the PD provider can
1081
    // round down the interval value. PD provider advertises the minimum and
1082
    // the maximum values as well as the granularity of the intermediate values
1083
    // it can support for ModerationInterval via the NDIS_PD_CAPABILITIES
1084
    // structure. If NDIS_PD_CAPS_NOTIFICATION_MODERATION_INTERVAL_SUPPORTED is
1085
    // NOT advertised by the PD provider, PD client will not set any 
1086
    // ModerationInterval value. 
1087
    //
1088
    NdisPDQCTLModerationInterval,
1089
1090
    //
1091
    // Type: NdisPDCTL_IN
1092
    // InBuffer: ULONG (ModerationCount)
1093
    // InBufferSize: sizeof(ULONG)
1094
    // 
1095
    // Used for setting a notification ModerationCount value on a given PD
1096
    // queue. ModerationCount is the maximum number of drainable PD_BUFFERs
1097
    // that a provider can accumulate in an armed PD queue before interrupting
1098
    // the host CPU to satisfy a drain notification request. If ModerationCount
1099
    // is zero or one, the provider performs no interrupt moderation on the PD
1100
    // queue regardless of the value of the ModerationInterval property.
1101
    // If ModerationCount is MAXULONG or larger than the size of the PD queue,
1102
    // ModerationInterval alone controls the interrupt moderation on the PD queue.
1103
    // If NDIS_PD_CAPS_NOTIFICATION_MODERATION_COUNT_SUPPORTED is NOT
1104
    // advertised by the PD provider, PD client will not set any ModerationCount
1105
    // value. The default value for ModerationCount is MAXULONG. 
1106
    //
1107
    NdisPDQCTLModerationCount,
1108
1109
    //
1110
    // Type: NdisPDCTL_IN
1111
    // InBuffer: ULONG (NotificationGroupId)
1112
    // InBufferSize: sizeof(ULONG)
1113
    //
1114
    // Used for setting a notification group id on an arm-able PD queue.
1115
    // By default, each PD queue upon allocation (via NDIS_PD_ALLOCATE_QUEUE
1116
    // or NDIS_PD_ACQUIRE_RECEIVE_QUEUES) has NO notification group id. PD
1117
    // clients can set a notification group id on an arm-able PD queue
1118
    // before calling NDIS_PD_REQUEST_DRAIN_NOTIFICATION for the first
1119
    // time. Once NDIS_PD_REQUEST_DRAIN_NOTIFICATION is called, the 
1120
    // notification group id for a  PD queue cannot be changed.
1121
    // See NdisMTriggerPDDrainNotification for how PD providers can use
1122
    // notification group information for optimizing drain notifications.
1123
    //
1124
    NdisPDQCTLNotificationGroupId,
1125
1126
    //
1127
    // Type: NdisPDCTL_IN
1128
    // InBuffer: NDIS_QOS_SQ_ID 
1129
    // InBufferSize: sizeof(NDIS_QOS_SQ_ID)
1130
    //
1131
    // Used for setting a QoS scheduler queue Id on a given PD queue.
1132
    //
1133
    NdisPDQCTLSchedulerQueueId,
1134
1135
    NdisPDQCTLMax
1136
} NDIS_PD_QUEUE_CONTROL_CODE;
1137
1138
//
1139
// The default NotificationGroupId value for a given PD queue, which means
1140
// that the PD queue is NOT part of any notification group.
1141
//
1142
#define NDIS_PD_NOTIFICATION_GROUP_ID_NONE ((ULONG)0)
1143
1144
typedef
1145
_IRQL_requires_(PASSIVE_LEVEL)
1146
_IRQL_requires_same_
1147
_Function_class_(NDIS_PD_QUEUE_CONTROL)
1148
NTSTATUS
1149
(NDIS_PD_QUEUE_CONTROL)(
1150
    _Inout_ NDIS_PD_QUEUE* NdisPDQueue,
1151
    _In_ NDIS_PD_CONTROL_TYPE ControlType,
1152
    _In_ NDIS_PD_QUEUE_CONTROL_CODE ControlCode,
1153
    _In_reads_bytes_opt_(InBufferSize) PVOID InBuffer,
1154
    _In_ ULONG InBufferSize,
1155
    _Out_writes_bytes_to_opt_(OutBufferSize, *BytesReturned) PVOID OutBuffer,
1156
    _In_ ULONG OutBufferSize,
1157
    _Out_opt_ ULONG* BytesReturned
1158
    );
1159
1160
typedef NDIS_PD_QUEUE_CONTROL *NDIS_PD_QUEUE_CONTROL_HANDLER;
1161
1162
typedef enum {
1163
1164
    NdisPDPCTLUnknown,
1165
1166
    //
1167
    // Type: NdisPDCTL_OUT
1168
    // OutBuffer: A variable length flat buffer which the provider stores a 
1169
    //            NDIS_PD_CAPABILITIES structure followed by a variable
1170
    //            number of elements pointed by certain offset fields in the
1171
    //            returned NDIS_PD_CAPABILITIES structure.
1172
    // OutBufferSize: length of the buffer passed via OutBuffer parameter
1173
    // *BytesReturned: Upon an NT_SUCCESS() return, this reflects the 
1174
    //                 actual number of bytes written into OutBuffer. 
1175
    //                 If OutBufferSize is not large enough to hold all the 
1176
    //                 bytes that the PD provider needs to write, then the
1177
    //                 provider stores the actual size needed in *BytesReturned
1178
    //                 and returns STATUS_BUFFER_TOO_SMALL.
1179
    //
1180
    NdisPDPCTLCapabilities,
1181
1182
    NdisPDPCTLMax
1183
} NDIS_PD_PROVIDER_CONTROL_CODE;
1184
1185
//
1186
// PD providers must return STATUS_NOT_SUPPORTED for provider control codes
1187
// that they do not recognize or support. NdisPDPCTLCapabilities is the only
1188
// defined control code currently, and support for it is optional. 
1189
//
1190
typedef
1191
_IRQL_requires_(PASSIVE_LEVEL)
1192
_IRQL_requires_same_
1193
_Function_class_(NDIS_PD_PROVIDER_CONTROL)
1194
NTSTATUS
1195
(NDIS_PD_PROVIDER_CONTROL)(
1196
    _In_ NDIS_PD_PROVIDER_HANDLE ProviderHandle,
1197
    _In_ NDIS_PD_CONTROL_TYPE ControlType,
1198
    _In_ NDIS_PD_PROVIDER_CONTROL_CODE ControlCode,
1199
    _In_reads_bytes_opt_(InBufferSize) PVOID InBuffer,
1200
    _In_ ULONG InBufferSize,
1201
    _Out_writes_bytes_to_opt_(OutBufferSize, *BytesReturned) PVOID OutBuffer,
1202
    _In_ ULONG OutBufferSize,
1203
    _Out_opt_ ULONG* BytesReturned
1204
    );
1205
1206
typedef NDIS_PD_PROVIDER_CONTROL *NDIS_PD_PROVIDER_CONTROL_HANDLER;
1207
1208
typedef struct _NDIS_PD_PROVIDER_DISPATCH {
1209
    //
1210
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1211
    // Header.Revision = NDIS_PD_PROVIDER_DISPATCH_REVISION_1;
1212
    // Header.Size >= NDIS_SIZEOF_PD_PROVIDER_DISPATCH_REVISION_1;
1213
    //
1214
    NDIS_OBJECT_HEADER Header;
1215
    ULONG Flags; // Reserved. Set to 0 by PD provider and ignored by PD client.
1216
    NDIS_PD_ALLOCATE_QUEUE_HANDLER NdisPDAllocateQueue;
1217
    NDIS_PD_FREE_QUEUE_HANDLER NdisPDFreeQueue;
1218
    NDIS_PD_ACQUIRE_RECEIVE_QUEUES_HANDLER NdisPDAcquireReceiveQueues;
1219
    NDIS_PD_RELEASE_RECEIVE_QUEUES_HANDLER NdisPDReleaseReceiveQueues;
1220
    NDIS_PD_ALLOCATE_COUNTER_HANDLER NdisPDAllocateCounter;
1221
    NDIS_PD_FREE_COUNTER_HANDLER NdisPDFreeCounter;
1222
    NDIS_PD_QUERY_COUNTER_HANDLER NdisPDQueryCounter;
1223
    NDIS_PD_SET_RECEIVE_FILTER_HANDLER NdisPDSetReceiveFilter;
1224
    NDIS_PD_CLEAR_RECEIVE_FILTER_HANDLER NdisPDClearReceiveFilter;
1225
    NDIS_PD_REQUEST_DRAIN_NOTIFICATION_HANDLER NdisPDRequestDrainNotification;
1226
    NDIS_PD_QUEUE_CONTROL_HANDLER NdisPDQueueControl;
1227
    NDIS_PD_PROVIDER_CONTROL_HANDLER NdisPDProviderControl;
1228
} NDIS_PD_PROVIDER_DISPATCH;
1229
1230
#define NDIS_PD_PROVIDER_DISPATCH_REVISION_1 1
1231
#define NDIS_SIZEOF_PD_PROVIDER_DISPATCH_REVISION_1 \
1232
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_PROVIDER_DISPATCH, NdisPDProviderControl)
1233
1234
1235
//
1236
// OID_PD_OPEN_PROVIDER
1237
//
1238
typedef struct DECLSPEC_ALIGN(8) _NDIS_PD_OPEN_PROVIDER_PARAMETERS {
1239
    //
1240
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1241
    // Header.Revision = NDIS_PD_OPEN_PROVIDER_PARAMETERS_REVISION_1;
1242
    // Header.Size >= NDIS_SIZEOF_PD_OPEN_PROVIDER_PARAMETERS_REVISION_1;
1243
    //
1244
    NDIS_OBJECT_HEADER Header;
1245
    ULONG Flags;
1246
    _Out_ NDIS_PD_PROVIDER_HANDLE ProviderHandle;
1247
    _Out_ CONST NDIS_PD_PROVIDER_DISPATCH* ProviderDispatch;
1248
} NDIS_PD_OPEN_PROVIDER_PARAMETERS;
1249
1250
#define NDIS_PD_OPEN_PROVIDER_PARAMETERS_REVISION_1 1
1251
#define NDIS_SIZEOF_PD_OPEN_PROVIDER_PARAMETERS_REVISION_1 \
1252
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_OPEN_PROVIDER_PARAMETERS, ProviderDispatch)
1253
1254
//
1255
// OID_PD_CLOSE_PROVIDER. An NDIS protocol or filter driver must call this
1256
// OID whenever it receives an unbind/detach notification, a pause indication,
1257
// a low-power event, or a PD config change event that indicates PD is disabled
1258
// on the binding/attachment. Before calling this OID, NDIS protocol/filter
1259
// driver must ensure that it closed/freed all PD objects such as queues,
1260
// counters, filters that it had created over the PD provider instance that is
1261
// being closed. NDIS protocol/filter driver must guarantee that there are no
1262
// in-progress calls to any of the PD provider dispatch table functions before
1263
// issung this OID.
1264
//
1265
typedef struct DECLSPEC_ALIGN(8) _NDIS_PD_CLOSE_PROVIDER_PARAMETERS {
1266
    //
1267
    // Header.Type = NDIS_OBJECT_TYPE_DEFAULT;
1268
    // Header.Revision = NDIS_PD_CLOSE_PROVIDER_PARAMETERS_REVISION_1;
1269
    // Header.Size >= NDIS_SIZEOF_PD_CLOSE_PROVIDER_PARAMETERS_REVISION_1;
1270
    //
1271
    NDIS_OBJECT_HEADER Header;
1272
    ULONG Flags; // Reserved. Must be set to 0 by client, ignored by provider
1273
    NDIS_PD_PROVIDER_HANDLE ProviderHandle;
1274
} NDIS_PD_CLOSE_PROVIDER_PARAMETERS;
1275
1276
#define NDIS_PD_CLOSE_PROVIDER_PARAMETERS_REVISION_1 1
1277
#define NDIS_SIZEOF_PD_CLOSE_PROVIDER_PARAMETERS_REVISION_1 \
1278
        RTL_SIZEOF_THROUGH_FIELD(NDIS_PD_CLOSE_PROVIDER_PARAMETERS, ProviderHandle)
1279
1280
#endif // (NDIS_SUPPORT_NDIS650)