forked from Zondax/hid
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathio.c
2865 lines (2632 loc) · 110 KB
/
io.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* -*- Mode: C; indent-tabs-mode:t ; c-basic-offset:8 -*- */
/*
* I/O functions for libusb
* Copyright © 2007-2009 Daniel Drake <[email protected]>
* Copyright © 2001 Johannes Erdfelt <[email protected]>
* Copyright © 2019-2022 Nathan Hjelm <[email protected]>
* Copyright © 2019-2022 Google LLC. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libusbi.h"
/**
* \page libusb_io Synchronous and asynchronous device I/O
*
* \section io_intro Introduction
*
* If you're using libusb in your application, you're probably wanting to
* perform I/O with devices - you want to perform USB data transfers.
*
* libusb offers two separate interfaces for device I/O. This page aims to
* introduce the two in order to help you decide which one is more suitable
* for your application. You can also choose to use both interfaces in your
* application by considering each transfer on a case-by-case basis.
*
* Once you have read through the following discussion, you should consult the
* detailed API documentation pages for the details:
* - \ref libusb_syncio
* - \ref libusb_asyncio
*
* \section theory Transfers at a logical level
*
* At a logical level, USB transfers typically happen in two parts. For
* example, when reading data from a endpoint:
* -# A request for data is sent to the device
* -# Some time later, the incoming data is received by the host
*
* or when writing data to an endpoint:
*
* -# The data is sent to the device
* -# Some time later, the host receives acknowledgement from the device that
* the data has been transferred.
*
* There may be an indefinite delay between the two steps. Consider a
* fictional USB input device with a button that the user can press. In order
* to determine when the button is pressed, you would likely submit a request
* to read data on a bulk or interrupt endpoint and wait for data to arrive.
* Data will arrive when the button is pressed by the user, which is
* potentially hours later.
*
* libusb offers both a synchronous and an asynchronous interface to performing
* USB transfers. The main difference is that the synchronous interface
* combines both steps indicated above into a single function call, whereas
* the asynchronous interface separates them.
*
* \section sync The synchronous interface
*
* The synchronous I/O interface allows you to perform a USB transfer with
* a single function call. When the function call returns, the transfer has
* completed and you can parse the results.
*
* If you have used libusb-0.1 before, this I/O style will seem familiar to
* you. libusb-0.1 only offered a synchronous interface.
*
* In our input device example, to read button presses you might write code
* in the following style:
\code
unsigned char data[4];
int actual_length;
int r = libusb_bulk_transfer(dev_handle, LIBUSB_ENDPOINT_IN, data, sizeof(data), &actual_length, 0);
if (r == 0 && actual_length == sizeof(data)) {
// results of the transaction can now be found in the data buffer
// parse them here and report button press
} else {
error();
}
\endcode
*
* The main advantage of this model is simplicity: you did everything with
* a single simple function call.
*
* However, this interface has its limitations. Your application will sleep
* inside libusb_bulk_transfer() until the transaction has completed. If it
* takes the user 3 hours to press the button, your application will be
* sleeping for that long. Execution will be tied up inside the library -
* the entire thread will be useless for that duration.
*
* Another issue is that by tying up the thread with that single transaction
* there is no possibility of performing I/O with multiple endpoints and/or
* multiple devices simultaneously, unless you resort to creating one thread
* per transaction.
*
* Additionally, there is no opportunity to cancel the transfer after the
* request has been submitted.
*
* For details on how to use the synchronous API, see the
* \ref libusb_syncio "synchronous I/O API documentation" pages.
*
* \section async The asynchronous interface
*
* Asynchronous I/O is the most significant new feature in libusb-1.0.
* Although it is a more complex interface, it solves all the issues detailed
* above.
*
* Instead of providing which functions that block until the I/O has complete,
* libusb's asynchronous interface presents non-blocking functions which
* begin a transfer and then return immediately. Your application passes a
* callback function pointer to this non-blocking function, which libusb will
* call with the results of the transaction when it has completed.
*
* Transfers which have been submitted through the non-blocking functions
* can be cancelled with a separate function call.
*
* The non-blocking nature of this interface allows you to be simultaneously
* performing I/O to multiple endpoints on multiple devices, without having
* to use threads.
*
* This added flexibility does come with some complications though:
* - In the interest of being a lightweight library, libusb does not create
* threads and can only operate when your application is calling into it. Your
* application must call into libusb from it's main loop when events are ready
* to be handled, or you must use some other scheme to allow libusb to
* undertake whatever work needs to be done.
* - libusb also needs to be called into at certain fixed points in time in
* order to accurately handle transfer timeouts.
* - Memory handling becomes more complex. You cannot use stack memory unless
* the function with that stack is guaranteed not to return until the transfer
* callback has finished executing.
* - You generally lose some linearity from your code flow because submitting
* the transfer request is done in a separate function from where the transfer
* results are handled. This becomes particularly obvious when you want to
* submit a second transfer based on the results of an earlier transfer.
*
* Internally, libusb's synchronous interface is expressed in terms of function
* calls to the asynchronous interface.
*
* For details on how to use the asynchronous API, see the
* \ref libusb_asyncio "asynchronous I/O API" documentation pages.
*/
/**
* \page libusb_packetoverflow Packets and overflows
*
* \section packets Packet abstraction
*
* The USB specifications describe how data is transmitted in packets, with
* constraints on packet size defined by endpoint descriptors. The host must
* not send data payloads larger than the endpoint's maximum packet size.
*
* libusb and the underlying OS abstract out the packet concept, allowing you
* to request transfers of any size. Internally, the request will be divided
* up into correctly-sized packets. You do not have to be concerned with
* packet sizes, but there is one exception when considering overflows.
*
* \section overflow Bulk/interrupt transfer overflows
*
* When requesting data on a bulk endpoint, libusb requires you to supply a
* buffer and the maximum number of bytes of data that libusb can put in that
* buffer. However, the size of the buffer is not communicated to the device -
* the device is just asked to send any amount of data.
*
* There is no problem if the device sends an amount of data that is less than
* or equal to the buffer size. libusb reports this condition to you through
* the \ref libusb_transfer::actual_length "libusb_transfer.actual_length"
* field.
*
* Problems may occur if the device attempts to send more data than can fit in
* the buffer. libusb reports LIBUSB_TRANSFER_OVERFLOW for this condition but
* other behaviour is largely undefined: actual_length may or may not be
* accurate, the chunk of data that can fit in the buffer (before overflow)
* may or may not have been transferred.
*
* Overflows are nasty, but can be avoided. Even though you were told to
* ignore packets above, think about the lower level details: each transfer is
* split into packets (typically small, with a maximum size of 512 bytes).
* Overflows can only happen if the final packet in an incoming data transfer
* is smaller than the actual packet that the device wants to transfer.
* Therefore, you will never see an overflow if your transfer buffer size is a
* multiple of the endpoint's packet size: the final packet will either
* fill up completely or will be only partially filled.
*/
/**
* @defgroup libusb_asyncio Asynchronous device I/O
*
* This page details libusb's asynchronous (non-blocking) API for USB device
* I/O. This interface is very powerful but is also quite complex - you will
* need to read this page carefully to understand the necessary considerations
* and issues surrounding use of this interface. Simplistic applications
* may wish to consider the \ref libusb_syncio "synchronous I/O API" instead.
*
* The asynchronous interface is built around the idea of separating transfer
* submission and handling of transfer completion (the synchronous model
* combines both of these into one). There may be a long delay between
* submission and completion, however the asynchronous submission function
* is non-blocking so will return control to your application during that
* potentially long delay.
*
* \section asyncabstraction Transfer abstraction
*
* For the asynchronous I/O, libusb implements the concept of a generic
* transfer entity for all types of I/O (control, bulk, interrupt,
* isochronous). The generic transfer object must be treated slightly
* differently depending on which type of I/O you are performing with it.
*
* This is represented by the public libusb_transfer structure type.
*
* \section asynctrf Asynchronous transfers
*
* We can view asynchronous I/O as a 5 step process:
* -# <b>Allocation</b>: allocate a libusb_transfer
* -# <b>Filling</b>: populate the libusb_transfer instance with information
* about the transfer you wish to perform
* -# <b>Submission</b>: ask libusb to submit the transfer
* -# <b>Completion handling</b>: examine transfer results in the
* libusb_transfer structure
* -# <b>Deallocation</b>: clean up resources
*
*
* \subsection asyncalloc Allocation
*
* This step involves allocating memory for a USB transfer. This is the
* generic transfer object mentioned above. At this stage, the transfer
* is "blank" with no details about what type of I/O it will be used for.
*
* Allocation is done with the libusb_alloc_transfer() function. You must use
* this function rather than allocating your own transfers.
*
* \subsection asyncfill Filling
*
* This step is where you take a previously allocated transfer and fill it
* with information to determine the message type and direction, data buffer,
* callback function, etc.
*
* You can either fill the required fields yourself or you can use the
* helper functions: libusb_fill_control_transfer(), libusb_fill_bulk_transfer()
* and libusb_fill_interrupt_transfer().
*
* \subsection asyncsubmit Submission
*
* When you have allocated a transfer and filled it, you can submit it using
* libusb_submit_transfer(). This function returns immediately but can be
* regarded as firing off the I/O request in the background.
*
* \subsection asynccomplete Completion handling
*
* After a transfer has been submitted, one of four things can happen to it:
*
* - The transfer completes (i.e. some data was transferred)
* - The transfer has a timeout and the timeout expires before all data is
* transferred
* - The transfer fails due to an error
* - The transfer is cancelled
*
* Each of these will cause the user-specified transfer callback function to
* be invoked. It is up to the callback function to determine which of the
* above actually happened and to act accordingly.
*
* The user-specified callback is passed a pointer to the libusb_transfer
* structure which was used to setup and submit the transfer. At completion
* time, libusb has populated this structure with results of the transfer:
* success or failure reason, number of bytes of data transferred, etc. See
* the libusb_transfer structure documentation for more information.
*
* <b>Important Note</b>: The user-specified callback is called from an event
* handling context. It is therefore important that no calls are made into
* libusb that will attempt to perform any event handling. Examples of such
* functions are any listed in the \ref libusb_syncio "synchronous API" and any of
* the blocking functions that retrieve \ref libusb_desc "USB descriptors".
*
* \subsection Deallocation
*
* When a transfer has completed (i.e. the callback function has been invoked),
* you are advised to free the transfer (unless you wish to resubmit it, see
* below). Transfers are deallocated with libusb_free_transfer().
*
* It is undefined behaviour to free a transfer which has not completed.
*
* \section asyncresubmit Resubmission
*
* You may be wondering why allocation, filling, and submission are all
* separated above where they could reasonably be combined into a single
* operation.
*
* The reason for separation is to allow you to resubmit transfers without
* having to allocate new ones every time. This is especially useful for
* common situations dealing with interrupt endpoints - you allocate one
* transfer, fill and submit it, and when it returns with results you just
* resubmit it for the next interrupt.
*
* \section asynccancel Cancellation
*
* Another advantage of using the asynchronous interface is that you have
* the ability to cancel transfers which have not yet completed. This is
* done by calling the libusb_cancel_transfer() function.
*
* libusb_cancel_transfer() is asynchronous/non-blocking in itself. When the
* cancellation actually completes, the transfer's callback function will
* be invoked, and the callback function should check the transfer status to
* determine that it was cancelled.
*
* On macOS and iOS it is not possible to cancel a single transfer. In this
* case cancelling one transfer on an endpoint will cause all transfers on
* that endpoint to be cancelled.
*
* Freeing the transfer after it has been cancelled but before cancellation
* has completed will result in undefined behaviour.
*
* \attention
* When a transfer is cancelled, some of the data may have been transferred.
* libusb will communicate this to you in the transfer callback.
* <b>Do not assume that no data was transferred.</b>
*
* \section asyncpartial Partial data transfer resulting from cancellation
*
* As noted above, some of the data may have been transferred at the time a
* transfer is cancelled. It is helpful to see how this is possible if you
* consider a bulk transfer to an endpoint with a packet size of 64 bytes.
* Supposing you submit a 512-byte transfer to this endpoint, the operating
* system will divide this transfer up into 8 separate 64-byte frames that the
* host controller will schedule for the device to transfer data. If this
* transfer is cancelled while the device is transferring data, a subset of
* these frames may be descheduled from the host controller before the device
* has the opportunity to finish transferring data to the host.
*
* What your application should do with a partial data transfer is a policy
* decision; there is no single answer that satisfies the needs of every
* application. The data that was successfully transferred should be
* considered entirely valid, but your application must decide what to do with
* the remaining data that was not transferred. Some possible actions to take
* are:
* - Resubmit another transfer for the remaining data, possibly with a shorter
* timeout
* - Discard the partially transferred data and report an error
*
* \section asynctimeout Timeouts
*
* When a transfer times out, libusb internally notes this and attempts to
* cancel the transfer. As noted in \ref asyncpartial "above", it is possible
* that some of the data may actually have been transferred. Your application
* should <b>always</b> check how much data was actually transferred once the
* transfer completes and act accordingly.
*
* \section bulk_overflows Overflows on device-to-host bulk/interrupt endpoints
*
* If your device does not have predictable transfer sizes (or it misbehaves),
* your application may submit a request for data on an IN endpoint which is
* smaller than the data that the device wishes to send. In some circumstances
* this will cause an overflow, which is a nasty condition to deal with. See
* the \ref libusb_packetoverflow page for discussion.
*
* \section asyncctrl Considerations for control transfers
*
* The <tt>libusb_transfer</tt> structure is generic and hence does not
* include specific fields for the control-specific setup packet structure.
*
* In order to perform a control transfer, you must place the 8-byte setup
* packet at the start of the data buffer. To simplify this, you could
* cast the buffer pointer to type struct libusb_control_setup, or you can
* use the helper function libusb_fill_control_setup().
*
* The wLength field placed in the setup packet must be the length you would
* expect to be sent in the setup packet: the length of the payload that
* follows (or the expected maximum number of bytes to receive). However,
* the length field of the libusb_transfer object must be the length of
* the data buffer - i.e. it should be wLength <em>plus</em> the size of
* the setup packet (LIBUSB_CONTROL_SETUP_SIZE).
*
* If you use the helper functions, this is simplified for you:
* -# Allocate a buffer of size LIBUSB_CONTROL_SETUP_SIZE plus the size of the
* data you are sending/requesting.
* -# Call libusb_fill_control_setup() on the data buffer, using the transfer
* request size as the wLength value (i.e. do not include the extra space you
* allocated for the control setup).
* -# If this is a host-to-device transfer, place the data to be transferred
* in the data buffer, starting at offset LIBUSB_CONTROL_SETUP_SIZE.
* -# Call libusb_fill_control_transfer() to associate the data buffer with
* the transfer (and to set the remaining details such as callback and timeout).
* - Note that there is no parameter to set the length field of the transfer.
* The length is automatically inferred from the wLength field of the setup
* packet.
* -# Submit the transfer.
*
* The multi-byte control setup fields (wValue, wIndex and wLength) must
* be given in little-endian byte order (the endianness of the USB bus).
* Endianness conversion is transparently handled by
* libusb_fill_control_setup() which is documented to accept host-endian
* values.
*
* Further considerations are needed when handling transfer completion in
* your callback function:
* - As you might expect, the setup packet will still be sitting at the start
* of the data buffer.
* - If this was a device-to-host transfer, the received data will be sitting
* at offset LIBUSB_CONTROL_SETUP_SIZE into the buffer.
* - The actual_length field of the transfer structure is relative to the
* wLength of the setup packet, rather than the size of the data buffer. So,
* if your wLength was 4, your transfer's <tt>length</tt> was 12, then you
* should expect an <tt>actual_length</tt> of 4 to indicate that the data was
* transferred in entirety.
*
* To simplify parsing of setup packets and obtaining the data from the
* correct offset, you may wish to use the libusb_control_transfer_get_data()
* and libusb_control_transfer_get_setup() functions within your transfer
* callback.
*
* Even though control endpoints do not halt, a completed control transfer
* may have a LIBUSB_TRANSFER_STALL status code. This indicates the control
* request was not supported.
*
* \section asyncintr Considerations for interrupt transfers
*
* All interrupt transfers are performed using the polling interval presented
* by the bInterval value of the endpoint descriptor.
*
* \section asynciso Considerations for isochronous transfers
*
* Isochronous transfers are more complicated than transfers to
* non-isochronous endpoints.
*
* To perform I/O to an isochronous endpoint, allocate the transfer by calling
* libusb_alloc_transfer() with an appropriate number of isochronous packets.
*
* During filling, set \ref libusb_transfer::type "type" to
* \ref libusb_transfer_type::LIBUSB_TRANSFER_TYPE_ISOCHRONOUS
* "LIBUSB_TRANSFER_TYPE_ISOCHRONOUS", and set
* \ref libusb_transfer::num_iso_packets "num_iso_packets" to a value less than
* or equal to the number of packets you requested during allocation.
* libusb_alloc_transfer() does not set either of these fields for you, given
* that you might not even use the transfer on an isochronous endpoint.
*
* Next, populate the length field for the first num_iso_packets entries in
* the \ref libusb_transfer::iso_packet_desc "iso_packet_desc" array. Section
* 5.6.3 of the USB2 specifications describe how the maximum isochronous
* packet length is determined by the wMaxPacketSize field in the endpoint
* descriptor.
* Two functions can help you here:
*
* - libusb_get_max_iso_packet_size() is an easy way to determine the max
* packet size for an isochronous endpoint. Note that the maximum packet
* size is actually the maximum number of bytes that can be transmitted in
* a single microframe, therefore this function multiplies the maximum number
* of bytes per transaction by the number of transaction opportunities per
* microframe.
* - libusb_set_iso_packet_lengths() assigns the same length to all packets
* within a transfer, which is usually what you want.
*
* For outgoing transfers, you'll obviously fill the buffer and populate the
* packet descriptors in hope that all the data gets transferred. For incoming
* transfers, you must ensure the buffer has sufficient capacity for
* the situation where all packets transfer the full amount of requested data.
*
* Completion handling requires some extra consideration. The
* \ref libusb_transfer::actual_length "actual_length" field of the transfer
* is meaningless and should not be examined; instead you must refer to the
* \ref libusb_iso_packet_descriptor::actual_length "actual_length" field of
* each individual packet.
*
* The \ref libusb_transfer::status "status" field of the transfer is also a
* little misleading:
* - If the packets were submitted and the isochronous data microframes
* completed normally, status will have value
* \ref libusb_transfer_status::LIBUSB_TRANSFER_COMPLETED
* "LIBUSB_TRANSFER_COMPLETED". Note that bus errors and software-incurred
* delays are not counted as transfer errors; the transfer.status field may
* indicate COMPLETED even if some or all of the packets failed. Refer to
* the \ref libusb_iso_packet_descriptor::status "status" field of each
* individual packet to determine packet failures.
* - The status field will have value
* \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR
* "LIBUSB_TRANSFER_ERROR" only when serious errors were encountered.
* - Other transfer status codes occur with normal behaviour.
*
* The data for each packet will be found at an offset into the buffer that
* can be calculated as if each prior packet completed in full. The
* libusb_get_iso_packet_buffer() and libusb_get_iso_packet_buffer_simple()
* functions may help you here.
*
* \section asynclimits Transfer length limitations
*
* Some operating systems may impose limits on the length of the transfer data
* buffer or, in the case of isochronous transfers, the length of individual
* isochronous packets. Such limits can be difficult for libusb to detect, so
* in most cases the library will simply try and submit the transfer as set up
* by you. If the transfer fails to submit because it is too large,
* libusb_submit_transfer() will return
* \ref libusb_error::LIBUSB_ERROR_INVALID_PARAM "LIBUSB_ERROR_INVALID_PARAM".
*
* The following are known limits for control transfer lengths. Note that this
* length includes the 8-byte setup packet.
* - Linux (4,096 bytes)
* - Windows (4,096 bytes)
*
* \section asyncmem Memory caveats
*
* In most circumstances, it is not safe to use stack memory for transfer
* buffers. This is because the function that fired off the asynchronous
* transfer may return before libusb has finished using the buffer, and when
* the function returns it's stack gets destroyed. This is true for both
* host-to-device and device-to-host transfers.
*
* The only case in which it is safe to use stack memory is where you can
* guarantee that the function owning the stack space for the buffer does not
* return until after the transfer's callback function has completed. In every
* other case, you need to use heap memory instead.
*
* \section asyncflags Fine control
*
* Through using this asynchronous interface, you may find yourself repeating
* a few simple operations many times. You can apply a bitwise OR of certain
* flags to a transfer to simplify certain things:
* - \ref libusb_transfer_flags::LIBUSB_TRANSFER_SHORT_NOT_OK
* "LIBUSB_TRANSFER_SHORT_NOT_OK" results in transfers which transferred
* less than the requested amount of data being marked with status
* \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR "LIBUSB_TRANSFER_ERROR"
* (they would normally be regarded as COMPLETED)
* - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER
* "LIBUSB_TRANSFER_FREE_BUFFER" allows you to ask libusb to free the transfer
* buffer when freeing the transfer.
* - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_TRANSFER
* "LIBUSB_TRANSFER_FREE_TRANSFER" causes libusb to automatically free the
* transfer after the transfer callback returns.
*
* \section asyncevent Event handling
*
* An asynchronous model requires that libusb perform work at various
* points in time - namely processing the results of previously-submitted
* transfers and invoking the user-supplied callback function.
*
* This gives rise to the libusb_handle_events() function which your
* application must call into when libusb has work do to. This gives libusb
* the opportunity to reap pending transfers, invoke callbacks, etc.
*
* \note
* All event handling is performed by whichever thread calls the
* libusb_handle_events() function. libusb does not invoke any callbacks
* outside of this context. Consequently, any callbacks will be run on the
* thread that calls the libusb_handle_events() function.
*
* When to call the libusb_handle_events() function depends on which model
* your application decides to use. The 2 different approaches:
*
* -# Repeatedly call libusb_handle_events() in blocking mode from a dedicated
* thread.
* -# Integrate libusb with your application's main event loop. libusb
* exposes a set of file descriptors which allow you to do this.
*
* The first approach has the big advantage that it will also work on Windows
* were libusb' poll API for select / poll integration is not available. So
* if you want to support Windows and use the async API, you must use this
* approach, see the \ref eventthread "Using an event handling thread" section
* below for details.
*
* If you prefer a single threaded approach with a single central event loop,
* see the \ref libusb_poll "polling and timing" section for how to integrate libusb
* into your application's main event loop.
*
* \section eventthread Using an event handling thread
*
* Lets begin with stating the obvious: If you're going to use a separate
* thread for libusb event handling, your callback functions MUST be
* thread-safe.
*
* Other then that doing event handling from a separate thread, is mostly
* simple. You can use an event thread function as follows:
\code
void *event_thread_func(void *ctx)
{
while (event_thread_run)
libusb_handle_events(ctx);
return NULL;
}
\endcode
*
* There is one caveat though, stopping this thread requires setting the
* event_thread_run variable to 0, and after that libusb_handle_events() needs
* to return control to event_thread_func. But unless some event happens,
* libusb_handle_events() will not return.
*
* There are 2 different ways of dealing with this, depending on if your
* application uses libusb' \ref libusb_hotplug "hotplug" support or not.
*
* Applications which do not use hotplug support, should not start the event
* thread until after their first call to libusb_open(), and should stop the
* thread when closing the last open device as follows:
\code
void my_close_handle(libusb_device_handle *dev_handle)
{
if (open_devs == 1)
event_thread_run = 0;
libusb_close(dev_handle); // This wakes up libusb_handle_events()
if (open_devs == 1)
pthread_join(event_thread);
open_devs--;
}
\endcode
*
* Applications using hotplug support should start the thread at program init,
* after having successfully called libusb_hotplug_register_callback(), and
* should stop the thread at program exit as follows:
\code
void my_libusb_exit(void)
{
event_thread_run = 0;
libusb_hotplug_deregister_callback(ctx, hotplug_cb_handle); // This wakes up libusb_handle_events()
pthread_join(event_thread);
libusb_exit(ctx);
}
\endcode
*/
/**
* @defgroup libusb_poll Polling and timing
*
* This page documents libusb's functions for polling events and timing.
* These functions are only necessary for users of the
* \ref libusb_asyncio "asynchronous API". If you are only using the simpler
* \ref libusb_syncio "synchronous API" then you do not need to ever call these
* functions.
*
* The justification for the functionality described here has already been
* discussed in the \ref asyncevent "event handling" section of the
* asynchronous API documentation. In summary, libusb does not create internal
* threads for event processing and hence relies on your application calling
* into libusb at certain points in time so that pending events can be handled.
*
* Your main loop is probably already calling poll() or select() or a
* variant on a set of file descriptors for other event sources (e.g. keyboard
* button presses, mouse movements, network sockets, etc). You then add
* libusb's file descriptors to your poll()/select() calls, and when activity
* is detected on such descriptors you know it is time to call
* libusb_handle_events().
*
* There is one final event handling complication. libusb supports
* asynchronous transfers which time out after a specified time period.
*
* On some platforms a timerfd is used, so the timeout handling is just another
* fd, on other platforms this requires that libusb is called into at or after
* the timeout to handle it. So, in addition to considering libusb's file
* descriptors in your main event loop, you must also consider that libusb
* sometimes needs to be called into at fixed points in time even when there
* is no file descriptor activity, see \ref polltime details.
*
* In order to know precisely when libusb needs to be called into, libusb
* offers you a set of pollable file descriptors and information about when
* the next timeout expires.
*
* If you are using the asynchronous I/O API, you must take one of the two
* following options, otherwise your I/O will not complete.
*
* \section pollsimple The simple option
*
* If your application revolves solely around libusb and does not need to
* handle other event sources, you can have a program structure as follows:
\code
// initialize libusb
// find and open device
// maybe fire off some initial async I/O
while (user_has_not_requested_exit)
libusb_handle_events(ctx);
// clean up and exit
\endcode
*
* With such a simple main loop, you do not have to worry about managing
* sets of file descriptors or handling timeouts. libusb_handle_events() will
* handle those details internally.
*
* \section libusb_pollmain The more advanced option
*
* \note This functionality is currently only available on Unix-like platforms.
* On Windows, libusb_get_pollfds() simply returns NULL. Applications which
* want to support Windows are advised to use an \ref eventthread
* "event handling thread" instead.
*
* In more advanced applications, you will already have a main loop which
* is monitoring other event sources: network sockets, X11 events, mouse
* movements, etc. Through exposing a set of file descriptors, libusb is
* designed to cleanly integrate into such main loops.
*
* In addition to polling file descriptors for the other event sources, you
* take a set of file descriptors from libusb and monitor those too. When you
* detect activity on libusb's file descriptors, you call
* libusb_handle_events_timeout() in non-blocking mode.
*
* What's more, libusb may also need to handle events at specific moments in
* time. No file descriptor activity is generated at these times, so your
* own application needs to be continually aware of when the next one of these
* moments occurs (through calling libusb_get_next_timeout()), and then it
* needs to call libusb_handle_events_timeout() in non-blocking mode when
* these moments occur. This means that you need to adjust your
* poll()/select() timeout accordingly.
*
* libusb provides you with a set of file descriptors to poll and expects you
* to poll all of them, treating them as a single entity. The meaning of each
* file descriptor in the set is an internal implementation detail,
* platform-dependent and may vary from release to release. Don't try and
* interpret the meaning of the file descriptors, just do as libusb indicates,
* polling all of them at once.
*
* In pseudo-code, you want something that looks like:
\code
// initialise libusb
libusb_get_pollfds(ctx)
while (user has not requested application exit) {
libusb_get_next_timeout(ctx);
poll(on libusb file descriptors plus any other event sources of interest,
using a timeout no larger than the value libusb just suggested)
if (poll() indicated activity on libusb file descriptors)
libusb_handle_events_timeout(ctx, &zero_tv);
if (time has elapsed to or beyond the libusb timeout)
libusb_handle_events_timeout(ctx, &zero_tv);
// handle events from other sources here
}
// clean up and exit
\endcode
*
* \subsection polltime Notes on time-based events
*
* The above complication with having to track time and call into libusb at
* specific moments is a bit of a headache. For maximum compatibility, you do
* need to write your main loop as above, but you may decide that you can
* restrict the supported platforms of your application and get away with
* a more simplistic scheme.
*
* These time-based event complications are \b not required on the following
* platforms:
* - Darwin
* - Linux, provided that the following version requirements are satisfied:
* - Linux v2.6.27 or newer, compiled with timerfd support
* - glibc v2.9 or newer
* - libusb v1.0.5 or newer
*
* Under these configurations, libusb_get_next_timeout() will \em always return
* 0, so your main loop can be simplified to:
\code
// initialise libusb
libusb_get_pollfds(ctx)
while (user has not requested application exit) {
poll(on libusb file descriptors plus any other event sources of interest,
using any timeout that you like)
if (poll() indicated activity on libusb file descriptors)
libusb_handle_events_timeout(ctx, &zero_tv);
// handle events from other sources here
}
// clean up and exit
\endcode
*
* Do remember that if you simplify your main loop to the above, you will
* lose compatibility with some platforms (including legacy Linux platforms,
* and <em>any future platforms supported by libusb which may have time-based
* event requirements</em>). The resultant problems will likely appear as
* strange bugs in your application.
*
* You can use the libusb_pollfds_handle_timeouts() function to do a runtime
* check to see if it is safe to ignore the time-based event complications.
* If your application has taken the shortcut of ignoring libusb's next timeout
* in your main loop, then you are advised to check the return value of
* libusb_pollfds_handle_timeouts() during application startup, and to abort
* if the platform does suffer from these timing complications.
*
* \subsection fdsetchange Changes in the file descriptor set
*
* The set of file descriptors that libusb uses as event sources may change
* during the life of your application. Rather than having to repeatedly
* call libusb_get_pollfds(), you can set up notification functions for when
* the file descriptor set changes using libusb_set_pollfd_notifiers().
*
* \subsection mtissues Multi-threaded considerations
*
* Unfortunately, the situation is complicated further when multiple threads
* come into play. If two threads are monitoring the same file descriptors,
* the fact that only one thread will be woken up when an event occurs causes
* some headaches.
*
* The events lock, event waiters lock, and libusb_handle_events_locked()
* entities are added to solve these problems. You do not need to be concerned
* with these entities otherwise.
*
* See the extra documentation: \ref libusb_mtasync
*/
/** \page libusb_mtasync Multi-threaded applications and asynchronous I/O
*
* libusb is a thread-safe library, but extra considerations must be applied
* to applications which interact with libusb from multiple threads.
*
* The underlying issue that must be addressed is that all libusb I/O
* revolves around monitoring file descriptors through the poll()/select()
* system calls. This is directly exposed at the
* \ref libusb_asyncio "asynchronous interface" but it is important to note that the
* \ref libusb_syncio "synchronous interface" is implemented on top of the
* asynchronous interface, therefore the same considerations apply.
*
* The issue is that if two or more threads are concurrently calling poll()
* or select() on libusb's file descriptors then only one of those threads
* will be woken up when an event arrives. The others will be completely
* oblivious that anything has happened.
*
* Consider the following pseudo-code, which submits an asynchronous transfer
* then waits for its completion. This style is one way you could implement a
* synchronous interface on top of the asynchronous interface (and libusb
* does something similar, albeit more advanced due to the complications
* explained on this page).
*
\code
void cb(struct libusb_transfer *transfer)
{
int *completed = transfer->user_data;
*completed = 1;
}
void myfunc() {
struct libusb_transfer *transfer;
unsigned char buffer[LIBUSB_CONTROL_SETUP_SIZE] __attribute__ ((aligned (2)));
int completed = 0;
transfer = libusb_alloc_transfer(0);
libusb_fill_control_setup(buffer,
LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_ENDPOINT_OUT, 0x04, 0x01, 0, 0);
libusb_fill_control_transfer(transfer, dev, buffer, cb, &completed, 1000);
libusb_submit_transfer(transfer);
while (!completed) {
poll(libusb file descriptors, 120*1000);
if (poll indicates activity)
libusb_handle_events_timeout(ctx, &zero_tv);
}
printf("completed!");
// other code here
}
\endcode
*
* Here we are <em>serializing</em> completion of an asynchronous event
* against a condition - the condition being completion of a specific transfer.
* The poll() loop has a long timeout to minimize CPU usage during situations
* when nothing is happening (it could reasonably be unlimited).
*
* If this is the only thread that is polling libusb's file descriptors, there
* is no problem: there is no danger that another thread will swallow up the
* event that we are interested in. On the other hand, if there is another
* thread polling the same descriptors, there is a chance that it will receive
* the event that we were interested in. In this situation, <tt>myfunc()</tt>
* will only realise that the transfer has completed on the next iteration of
* the loop, <em>up to 120 seconds later.</em> Clearly a two-minute delay is
* undesirable, and don't even think about using short timeouts to circumvent
* this issue!
*
* The solution here is to ensure that no two threads are ever polling the
* file descriptors at the same time. A naive implementation of this would
* impact the capabilities of the library, so libusb offers the scheme
* documented below to ensure no loss of functionality.
*
* Before we go any further, it is worth mentioning that all libusb-wrapped
* event handling procedures fully adhere to the scheme documented below.
* This includes libusb_handle_events() and its variants, and all the
* synchronous I/O functions - libusb hides this headache from you.
*
* \section Using libusb_handle_events() from multiple threads
*
* Even when only using libusb_handle_events() and synchronous I/O functions,
* you can still have a race condition. You might be tempted to solve the
* above with libusb_handle_events() like so:
*
\code
libusb_submit_transfer(transfer);
while (!completed) {
libusb_handle_events(ctx);
}
printf("completed!");
\endcode
*
* This however has a race between the checking of completed and
* libusb_handle_events() acquiring the events lock, so another thread
* could have completed the transfer, resulting in this thread hanging
* until either a timeout or another event occurs. See also commit
* 6696512aade99bb15d6792af90ae329af270eba6 which fixes this in the
* synchronous API implementation of libusb.
*
* Fixing this race requires checking the variable completed only after
* taking the event lock, which defeats the concept of just calling
* libusb_handle_events() without worrying about locking. This is why
* libusb-1.0.9 introduces the new libusb_handle_events_timeout_completed()
* and libusb_handle_events_completed() functions, which handles doing the
* completion check for you after they have acquired the lock:
*
\code
libusb_submit_transfer(transfer);
while (!completed) {
libusb_handle_events_completed(ctx, &completed);
}
printf("completed!");
\endcode
*
* This nicely fixes the race in our example. Note that if all you want to
* do is submit a single transfer and wait for its completion, then using
* one of the synchronous I/O functions is much easier.
*
* \note
* The `completed` variable must be modified while holding the event lock,
* otherwise a race condition can still exist. It is simplest to do so from
* within the transfer callback as shown above.
*
* \section eventlock The events lock
*
* The problem is when we consider the fact that libusb exposes file
* descriptors to allow for you to integrate asynchronous USB I/O into
* existing main loops, effectively allowing you to do some work behind
* libusb's back. If you do take libusb's file descriptors and pass them to
* poll()/select() yourself, you need to be aware of the associated issues.
*
* The first concept to be introduced is the events lock. The events lock
* is used to serialize threads that want to handle events, such that only
* one thread is handling events at any one time.
*
* You must take the events lock before polling libusb file descriptors,
* using libusb_lock_events(). You must release the lock as soon as you have
* aborted your poll()/select() loop, using libusb_unlock_events().
*
* \section threadwait Letting other threads do the work for you
*
* Although the events lock is a critical part of the solution, it is not
* enough on it's own. You might wonder if the following is sufficient...
\code
libusb_lock_events(ctx);
while (!completed) {
poll(libusb file descriptors, 120*1000);
if (poll indicates activity)
libusb_handle_events_timeout(ctx, &zero_tv);
}
libusb_unlock_events(ctx);
\endcode
* ...and the answer is that it is not. This is because the transfer in the
* code shown above may take a long time (say 30 seconds) to complete, and
* the lock is not released until the transfer is completed.
*
* Another thread with similar code that wants to do event handling may be
* working with a transfer that completes after a few milliseconds. Despite
* having such a quick completion time, the other thread cannot check that
* status of its transfer until the code above has finished (30 seconds later)
* due to contention on the lock.
*
* To solve this, libusb offers you a mechanism to determine when another
* thread is handling events. It also offers a mechanism to block your thread
* until the event handling thread has completed an event (and this mechanism
* does not involve polling of file descriptors).
*
* After determining that another thread is currently handling events, you
* obtain the <em>event waiters</em> lock using libusb_lock_event_waiters().
* You then re-check that some other thread is still handling events, and if
* so, you call libusb_wait_for_event().
*
* libusb_wait_for_event() puts your application to sleep until an event
* occurs, or until a thread releases the events lock. When either of these
* things happen, your thread is woken up, and should re-check the condition
* it was waiting on. It should also re-check that another thread is handling
* events, and if not, it should start handling events itself.
*
* This looks like the following, as pseudo-code:
\code
retry:
if (libusb_try_lock_events(ctx) == 0) {
// we obtained the event lock: do our own event handling
while (!completed) {
if (!libusb_event_handling_ok(ctx)) {
libusb_unlock_events(ctx);
goto retry;
}
poll(libusb file descriptors, 120*1000);
if (poll indicates activity)
libusb_handle_events_locked(ctx, 0);
}
libusb_unlock_events(ctx);
} else {
// another thread is doing event handling. wait for it to signal us that