Advertisement
Perka

f_mtp_samsung.c

Aug 6th, 2013
206
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 42.78 KB | None | 0 0
  1. /*
  2. * drivers/usb/gadget/f_mtp_samsung.c
  3. *
  4. * Function Driver for USB MTP,
  5. * f_mtp_samsung.c -- MTP Driver, for MTP development,
  6. *
  7. * Copyright (C) 2009 by Samsung Electronics,
  8. * Author:Deepak M.G. <deepak.guru@samsung.com>,
  9. * Author:Madhukar.J <madhukar.j@samsung.com>,
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. */
  21.  
  22. /*
  23. * f_mtp_samsung.c file is the driver for MTP device. Totally three
  24. * EndPoints will be configured in which 2 Bulk End Points
  25. * and 1 Interrupt End point. This driver will also register as
  26. * misc driver and exposes file operation funtions to user space.
  27. */
  28.  
  29. /* Includes */
  30. #include <linux/module.h>
  31. #include <linux/init.h>
  32. #include <linux/poll.h>
  33. #include <linux/delay.h>
  34. #include <linux/wait.h>
  35. #include <linux/err.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/types.h>
  38. #include <linux/device.h>
  39. #include <linux/miscdevice.h>
  40. #include <linux/kernel.h>
  41. #include <linux/kref.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/string.h>
  44. #include <linux/usb.h>
  45. #include <linux/usb_usual.h>
  46. #include <linux/usb/ch9.h>
  47. #include <linux/usb/composite.h>
  48. #include <linux/usb/gadget.h>
  49. #include <linux/hardirq.h>
  50. #include <linux/sched.h>
  51. #include <linux/usb/f_accessory.h>
  52. #include <asm-generic/siginfo.h>
  53. #include <linux/usb/android_composite.h>
  54. #include <linux/kernel.h>
  55. #include "f_mtp.h"
  56. #include "gadget_chips.h"
  57.  
  58. /*-------------------------------------------------------------------------*/
  59. /*Only for Debug*/
  60. #define DEBUG_MTP 0
  61. /*#define CSY_TEST */
  62.  
  63. #if DEBUG_MTP
  64. #define DEBUG_MTP_SETUP
  65. #define DEBUG_MTP_READ
  66. #define DEBUG_MTP_WRITE
  67.  
  68. #else
  69. #undef DEBUG_MTP_SETUP
  70. #undef DEBUG_MTP_READ
  71. #undef DEBUG_MTP_WRITE
  72. #endif
  73.  
  74. /*#define DEBUG_MTP_SETUP*/
  75. /*#define DEBUG_MTP_READ*/
  76. /*#define DEBUG_MTP_WRITE*/
  77.  
  78. #ifdef DEBUG_MTP_SETUP
  79. #define DEBUG_MTPB(fmt, args...) printk(fmt, ##args)
  80. #else
  81. #define DEBUG_MTPB(fmt, args...) do {} while (0)
  82. #endif
  83.  
  84. #ifdef DEBUG_MTP_READ
  85. #define DEBUG_MTPR(fmt, args...) printk(fmt, ##args)
  86. #else
  87. #define DEBUG_MTPR(fmt, args...) do {} while (0)
  88. #endif
  89. #ifdef DEBUG_MTP_WRITE
  90. #define DEBUG_MTPW(fmt, args...) printk(fmt, ##args)
  91. #else
  92. #define DEBUG_MTPW(fmt, args...) do {} while (0)
  93. #endif
  94. /*-------------------------------------------------------------------------*/
  95.  
  96. #define MTPG_BULK_BUFFER_SIZE 32768
  97. #define MTPG_INTR_BUFFER_SIZE 28
  98.  
  99. /* number of rx and tx requests to allocate */
  100. #define MTPG_RX_REQ_MAX 8
  101. #define MTPG_MTPG_TX_REQ_MAX 8
  102. #define MTPG_INTR_REQ_MAX 5
  103.  
  104. /* ID for Microsoft MTP OS String */
  105. #define MTPG_OS_STRING_ID 0xEE
  106.  
  107. #define DRIVER_NAME "usb_mtp_gadget"
  108.  
  109. static const char mtpg_longname[] = "mtp";
  110. static const char shortname[] = DRIVER_NAME;
  111. static int mtp_pid;
  112.  
  113. /* MTP Device Structure*/
  114. struct mtpg_dev {
  115. struct usb_function function;
  116. struct usb_composite_dev *cdev;
  117. struct usb_gadget *gadget;
  118.  
  119. spinlock_t lock;
  120.  
  121. u8 config;
  122. int online;
  123. int error;
  124. int read_ready;
  125. struct list_head tx_idle;
  126. struct list_head rx_idle;
  127. struct list_head rx_done;
  128. struct list_head intr_idle;
  129. wait_queue_head_t read_wq;
  130. wait_queue_head_t write_wq;
  131. wait_queue_head_t intr_wq;
  132.  
  133. struct usb_request *read_req;
  134. unsigned char *read_buf;
  135. unsigned read_count;
  136.  
  137. struct usb_ep *bulk_in;
  138. struct usb_ep *bulk_out;
  139. struct usb_ep *int_in;
  140. struct usb_request *notify_req;
  141.  
  142. struct workqueue_struct *wq;
  143. struct work_struct read_send_work;
  144. struct file *read_send_file;
  145.  
  146. int64_t read_send_length;
  147.  
  148. uint16_t read_send_cmd;
  149. uint32_t read_send_id;
  150. int read_send_result;
  151. atomic_t read_excl;
  152. atomic_t write_excl;
  153. atomic_t ioctl_excl;
  154. atomic_t open_excl;
  155. atomic_t wintfd_excl;
  156. char cancel_io_buf[USB_PTPREQUEST_CANCELIO_SIZE+1];
  157. int cancel_io;
  158. };
  159.  
  160. /* Global mtpg_dev Structure
  161. * the_mtpg variable be used between mtpg_open() and mtpg_function_bind() */
  162. static struct mtpg_dev *the_mtpg;
  163.  
  164. /* Three full-speed and high-speed endpoint descriptors: bulk-in, bulk-out,
  165. * and interrupt-in. */
  166.  
  167. struct usb_interface_descriptor mtpg_interface_desc = {
  168. .bLength = USB_DT_INTERFACE_SIZE,
  169. .bDescriptorType = USB_DT_INTERFACE,
  170. .bInterfaceNumber = 0,
  171. .bNumEndpoints = 3,
  172. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  173. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  174. .bInterfaceProtocol = 0,
  175. };
  176.  
  177. static struct usb_interface_descriptor ptp_interface_desc = {
  178. .bLength = USB_DT_INTERFACE_SIZE,
  179. .bDescriptorType = USB_DT_INTERFACE,
  180. .bInterfaceNumber = 0,
  181. .bNumEndpoints = 3,
  182. .bInterfaceClass = USB_CLASS_STILL_IMAGE,
  183. .bInterfaceSubClass = 1,
  184. .bInterfaceProtocol = 1,
  185. };
  186.  
  187. static struct usb_endpoint_descriptor fs_mtpg_in_desc = {
  188. .bLength = USB_DT_ENDPOINT_SIZE,
  189. .bDescriptorType = USB_DT_ENDPOINT,
  190. .bEndpointAddress = USB_DIR_IN,
  191. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  192. /* wMaxPacketSize set by autoconfiguration */
  193. };
  194.  
  195. static struct usb_endpoint_descriptor fs_mtpg_out_desc = {
  196. .bLength = USB_DT_ENDPOINT_SIZE,
  197. .bDescriptorType = USB_DT_ENDPOINT,
  198. .bEndpointAddress = USB_DIR_OUT,
  199. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  200. /* wMaxPacketSize set by autoconfiguration */
  201. };
  202.  
  203. static struct usb_endpoint_descriptor int_fs_notify_desc = {
  204. .bLength = USB_DT_ENDPOINT_SIZE,
  205. .bDescriptorType = USB_DT_ENDPOINT,
  206. .bEndpointAddress = USB_DIR_IN,
  207. .bmAttributes = USB_ENDPOINT_XFER_INT,
  208. .wMaxPacketSize = __constant_cpu_to_le16(MTPG_INTR_BUFFER_SIZE),
  209. .bInterval = 6,
  210. };
  211.  
  212. static struct usb_descriptor_header *fs_mtpg_desc[] = {
  213. (struct usb_descriptor_header *) &mtpg_interface_desc,
  214. (struct usb_descriptor_header *) &fs_mtpg_in_desc,
  215. (struct usb_descriptor_header *) &fs_mtpg_out_desc,
  216. (struct usb_descriptor_header *) &int_fs_notify_desc,
  217. NULL,
  218. };
  219.  
  220. static struct usb_endpoint_descriptor hs_mtpg_in_desc = {
  221. .bLength = USB_DT_ENDPOINT_SIZE,
  222. .bDescriptorType = USB_DT_ENDPOINT,
  223. /*bEndpointAddress copied from fs_mtpg_in_desc
  224. during mtpg_function_bind()*/
  225. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  226. .wMaxPacketSize = __constant_cpu_to_le16(512),
  227. };
  228.  
  229. static struct usb_endpoint_descriptor hs_mtpg_out_desc = {
  230. .bLength = USB_DT_ENDPOINT_SIZE,
  231. .bDescriptorType = USB_DT_ENDPOINT,
  232. /*bEndpointAddress copied from fs_mtpg_out_desc
  233. during mtpg_function_bind()*/
  234. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  235. .wMaxPacketSize = __constant_cpu_to_le16(512),
  236. .bInterval = 1, /* NAK every 1 uframe */
  237. };
  238.  
  239. static struct usb_endpoint_descriptor int_hs_notify_desc = {
  240. .bLength = USB_DT_ENDPOINT_SIZE,
  241. .bDescriptorType = USB_DT_ENDPOINT,
  242. .bEndpointAddress = USB_DIR_IN,
  243. .bmAttributes = USB_ENDPOINT_XFER_INT,
  244. .wMaxPacketSize = __constant_cpu_to_le16(MTPG_INTR_BUFFER_SIZE),
  245. .bInterval = 6,
  246. };
  247.  
  248. static struct usb_descriptor_header *hs_mtpg_desc[] = {
  249. (struct usb_descriptor_header *) &mtpg_interface_desc,
  250. (struct usb_descriptor_header *) &hs_mtpg_in_desc,
  251. (struct usb_descriptor_header *) &hs_mtpg_out_desc,
  252. (struct usb_descriptor_header *) &int_hs_notify_desc,
  253. NULL
  254. };
  255.  
  256. static struct usb_descriptor_header *fs_ptp_descs[] = {
  257. (struct usb_descriptor_header *) &ptp_interface_desc,
  258. (struct usb_descriptor_header *) &fs_mtpg_in_desc,
  259. (struct usb_descriptor_header *) &fs_mtpg_out_desc,
  260. (struct usb_descriptor_header *) &int_fs_notify_desc,
  261. NULL,
  262. };
  263.  
  264. static struct usb_descriptor_header *hs_ptp_descs[] = {
  265. (struct usb_descriptor_header *) &ptp_interface_desc,
  266. (struct usb_descriptor_header *) &hs_mtpg_in_desc,
  267. (struct usb_descriptor_header *) &hs_mtpg_out_desc,
  268. (struct usb_descriptor_header *) &int_hs_notify_desc,
  269. NULL,
  270. };
  271.  
  272. /* string IDs are assigned dynamically */
  273. #define F_MTP_IDX 0
  274. #define STRING_PRODUCT_IDX 1
  275. #define STRING_SERIAL_IDX 2
  276.  
  277. /* default serial number takes at least two packets */
  278. static const char serial[] = "0123456789.0123456789.0123456789";
  279.  
  280. static struct usb_string strings_dev_mtp[] = {
  281. [F_MTP_IDX].s = "MTP",
  282. [STRING_PRODUCT_IDX].s = mtpg_longname,
  283. [STRING_SERIAL_IDX].s = serial,
  284. { }, /* end of list */
  285. };
  286.  
  287. static struct usb_gadget_strings stringtab_mtp = {
  288. .language = 0x0409, /* en-us */
  289. .strings = strings_dev_mtp,
  290. };
  291.  
  292. static struct usb_gadget_strings *mtpg_dev_strings[] = {
  293. &stringtab_mtp,
  294. NULL,
  295. };
  296.  
  297. /* Microsoft MTP OS String */
  298. static u8 mtpg_os_string[] = {
  299. 18, /* sizeof(mtpg_os_string) */
  300. USB_DT_STRING,
  301. /* Signature field: "MSFT100" */
  302. 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
  303. /* vendor code */
  304. 1,
  305. /* padding */
  306. 0
  307. };
  308.  
  309. /* Microsoft Extended Configuration Descriptor Header Section */
  310. struct mtpg_ext_config_desc_header {
  311. __le32 dwLength;
  312. __u16 bcdVersion;
  313. __le16 wIndex;
  314. __u8 bCount;
  315. __u8 reserved[7];
  316. };
  317.  
  318. /* Microsoft Extended Configuration Descriptor Function Section */
  319. struct mtpg_ext_config_desc_function {
  320. __u8 bFirstInterfaceNumber;
  321. __u8 bInterfaceCount;
  322. __u8 compatibleID[8];
  323. __u8 subCompatibleID[8];
  324. __u8 reserved[6];
  325. };
  326.  
  327. /* MTP Extended Configuration Descriptor */
  328. struct {
  329. struct mtpg_ext_config_desc_header header;
  330. struct mtpg_ext_config_desc_function function;
  331. } mtpg_ext_config_desc = {
  332. .header = {
  333. .dwLength = __constant_cpu_to_le32
  334. (sizeof(mtpg_ext_config_desc)),
  335. .bcdVersion = __constant_cpu_to_le16(0x0100),
  336. .wIndex = __constant_cpu_to_le16(4),
  337. .bCount = __constant_cpu_to_le16(1),
  338. },
  339. .function = {
  340. .bFirstInterfaceNumber = 0,
  341. .bInterfaceCount = 1,
  342. .compatibleID = { 'M', 'T', 'P' },
  343. },
  344. };
  345.  
  346. /* Function : Change config for multi configuration
  347. * Parameter : int conf_num (config number)
  348. * 0 - use mtp only without Samsung USB Driver
  349. * 1 - use mtp + acm with Samsung USB Driver
  350. * Description
  351. * Below function is for samsung multi configuration
  352. * feature made by soonyong,cho.
  353. * Please add below handler to set_config_desc of function.
  354. * Date : 2011-08-03
  355. */
  356. static int mtp_set_config_desc(int conf_num)
  357. {
  358. switch (conf_num) {
  359. case 0:
  360. mtpg_interface_desc.bInterfaceClass =
  361. USB_CLASS_VENDOR_SPEC;
  362. mtpg_interface_desc.bInterfaceSubClass =
  363. USB_SUBCLASS_VENDOR_SPEC;
  364. mtpg_interface_desc.bInterfaceProtocol =
  365. 0x0;
  366. break;
  367. case 1:
  368. mtpg_interface_desc.bInterfaceClass =
  369. USB_CLASS_STILL_IMAGE;
  370. mtpg_interface_desc.bInterfaceSubClass =
  371. 0x01;
  372. mtpg_interface_desc.bInterfaceProtocol =
  373. 0x01;
  374. break;
  375.  
  376. }
  377. return 1;
  378. }
  379.  
  380. /* -------------------------------------------------------------------------
  381. * Main Functionalities Start!
  382. * ------------------------------------------------------------------------- */
  383. static inline struct mtpg_dev *mtpg_func_to_dev(struct usb_function *f)
  384. {
  385. return container_of(f, struct mtpg_dev, function);
  386. }
  387.  
  388. static inline int _lock(atomic_t *excl)
  389. {
  390.  
  391. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  392.  
  393. if (atomic_inc_return(excl) == 1) {
  394. return 0;
  395. } else {
  396. atomic_dec(excl);
  397. return -1;
  398. }
  399. }
  400.  
  401. static inline void _unlock(atomic_t *excl)
  402. {
  403. atomic_dec(excl);
  404. }
  405.  
  406. /* add a request to the tail of a list */
  407. static void mtpg_req_put(struct mtpg_dev *dev, struct list_head *head,
  408. struct usb_request *req)
  409. {
  410. unsigned long flags;
  411.  
  412. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  413.  
  414. spin_lock_irqsave(&dev->lock, flags);
  415. list_add_tail(&req->list, head);
  416. spin_unlock_irqrestore(&dev->lock, flags);
  417. }
  418.  
  419. /* remove a request from the head of a list */
  420. static struct usb_request *mtpg_req_get(struct mtpg_dev *dev,
  421. struct list_head *head)
  422. {
  423. unsigned long flags;
  424. struct usb_request *req;
  425.  
  426. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  427.  
  428. spin_lock_irqsave(&dev->lock, flags);
  429. if (list_empty(head)) {
  430. req = 0;
  431. } else {
  432. req = list_first_entry(head, struct usb_request, list);
  433. list_del(&req->list);
  434. }
  435. spin_unlock_irqrestore(&dev->lock, flags);
  436.  
  437. return req;
  438. }
  439.  
  440. static int mtp_send_signal(int value)
  441. {
  442. int ret;
  443. struct siginfo info;
  444. struct task_struct *t;
  445. memset(&info, 0, sizeof(struct siginfo));
  446. info.si_signo = SIG_SETUP;
  447. info.si_code = SI_QUEUE;
  448. info.si_int = value;
  449. rcu_read_lock();
  450.  
  451. if (!current->nsproxy) {
  452. printk(KERN_DEBUG "process has gone\n");
  453. rcu_read_unlock();
  454. return -ENODEV;
  455. }
  456.  
  457. t = pid_task(find_vpid(mtp_pid), PIDTYPE_PID);
  458.  
  459. if (t == NULL) {
  460. printk(KERN_DEBUG "no such pid\n");
  461. rcu_read_unlock();
  462. return -ENODEV;
  463. }
  464.  
  465. rcu_read_unlock();
  466. /*send the signal*/
  467. ret = send_sig_info(SIG_SETUP, &info, t);
  468. if (ret < 0) {
  469. printk(KERN_ERR "[%s]error sending signal\n", __func__);
  470. return ret;
  471. }
  472. return 0;
  473.  
  474. }
  475.  
  476. static int mtpg_open(struct inode *ip, struct file *fp)
  477. {
  478. printk(KERN_DEBUG "[%s]\tline = [%d]\n", __func__, __LINE__);
  479.  
  480. if (_lock(&the_mtpg->open_excl)) {
  481. printk(KERN_ERR "mtpg_open fn mtpg device busy\n");
  482. return -EBUSY;
  483. }
  484.  
  485. fp->private_data = the_mtpg;
  486.  
  487. /* clear the error latch */
  488.  
  489. DEBUG_MTPB("[%s] mtpg_open and clearing the error = 0\n", __func__);
  490.  
  491. the_mtpg->error = 0;
  492.  
  493. return 0;
  494. }
  495.  
  496. static ssize_t mtpg_read(struct file *fp, char __user *buf,
  497. size_t count, loff_t *pos)
  498. {
  499. struct mtpg_dev *dev = fp->private_data;
  500. struct usb_request *req;
  501. int r = count, xfer;
  502. int ret;
  503.  
  504. DEBUG_MTPR("[%s] and count = (%d)\n", __func__, count);
  505.  
  506. if (_lock(&dev->read_excl))
  507. return -EBUSY;
  508.  
  509. while (!((dev->online || dev->error) && dev->read_ready)) {
  510. DEBUG_MTPR("[%s] and line is = %d\n", __func__, __LINE__);
  511. ret = wait_event_interruptible(dev->read_wq,
  512. ((dev->online || dev->error) && dev->read_ready));
  513. if (ret < 0) {
  514. _unlock(&dev->read_excl);
  515. printk(KERN_DEBUG "[%s]line is = %d,mtp_read ret<0\n",
  516. __func__, __LINE__);
  517. return ret;
  518. }
  519. }
  520.  
  521. while (count > 0) {
  522. DEBUG_MTPR("[%s] and line is = %d\n", __func__, __LINE__);
  523.  
  524. if (dev->error) {
  525. r = -EIO;
  526. printk(KERN_ERR "[%s]\t%d:dev->error so break r=%d\n",
  527. __func__, __LINE__, r);
  528. break;
  529. }
  530.  
  531. /* if we have idle read requests, get them queued */
  532. DEBUG_MTPR("[%s]\t%d: get request\n", __func__, __LINE__);
  533. while ((req = mtpg_req_get(dev, &dev->rx_idle))) {
  534. requeue_req:
  535. req->length = MTPG_BULK_BUFFER_SIZE;
  536. DEBUG_MTPR("[%s]\t%d:usb-ep-queue\n",
  537. __func__, __LINE__);
  538. ret = usb_ep_queue(dev->bulk_out, req, GFP_ATOMIC);
  539.  
  540. DEBUG_MTPR("[%s]\t%d:Endpoint: %s\n",
  541. __func__, __LINE__, dev->bulk_out->name);
  542.  
  543. if (ret < 0) {
  544. r = -EIO;
  545. dev->error = 1;
  546. mtpg_req_put(dev, &dev->rx_idle, req);
  547. printk(KERN_ERR "[%s]line[%d]FAIL r=%d\n",
  548. __func__, __LINE__, r);
  549. goto fail;
  550. } else {
  551. DEBUG_MTPR("[%s]rx req queue%p\n",
  552. __func__, req);
  553. }
  554. }
  555.  
  556. DEBUG_MTPR("[%s]\t%d:read_count = %d\n",
  557. __func__, __LINE__, dev->read_count);
  558.  
  559. /* if we have data pending, give it to userspace */
  560. if (dev->read_count > 0) {
  561. DEBUG_MTPR("[%s]\t%d: read_count = %d\n",
  562. __func__, __LINE__, dev->read_count);
  563. if (dev->read_count < count)
  564. xfer = dev->read_count;
  565. else
  566. xfer = count;
  567.  
  568. DEBUG_MTPR("[%s]copy_to_user 0x%x bytes on EP %p\n",
  569. __func__, dev->read_count, dev->bulk_out);
  570.  
  571. if (copy_to_user(buf, dev->read_buf, xfer)) {
  572. r = -EFAULT;
  573. printk(KERN_ERR "[%s]%d:cpytouer fail r=%d\n",
  574. __func__, __LINE__, r);
  575. break;
  576. }
  577.  
  578. dev->read_buf += xfer;
  579. dev->read_count -= xfer;
  580. buf += xfer;
  581. count -= xfer;
  582.  
  583. /* if we've emptied the buffer, release the request */
  584. if (dev->read_count == 0) {
  585. DEBUG_MTPR("[%s] and line is = %d\n",
  586. __func__, __LINE__);
  587. mtpg_req_put(dev, &dev->rx_idle, dev->read_req);
  588. dev->read_req = 0;
  589. }
  590.  
  591. /*Updating the buffer size and returnung
  592. from mtpg_read */
  593. r = xfer;
  594. DEBUG_MTPR("[%s] \t %d: returning lenght %d\n",
  595. __func__, __LINE__, r);
  596. goto fail;
  597. }
  598.  
  599. /* wait for a request to complete */
  600. req = 0;
  601. DEBUG_MTPR("[%s] and line is = %d\n", __func__, __LINE__);
  602. ret = wait_event_interruptible(dev->read_wq,
  603. ((req = mtpg_req_get(dev, &dev->rx_done))
  604. || dev->error));
  605. DEBUG_MTPR("[%s]\t%d: dev->error %d and req = %p\n",
  606. __func__, __LINE__, dev->error, req);
  607.  
  608. if (req != 0) {
  609. /* if we got a 0-len one we need to put it back into
  610. ** service. if we made it the current read req we'd
  611. ** be stuck forever
  612. */
  613. if (req->actual == 0)
  614. goto requeue_req;
  615.  
  616. dev->read_req = req;
  617. dev->read_count = req->actual;
  618. dev->read_buf = req->buf;
  619.  
  620. DEBUG_MTPR("[%s]\t%d: rx_req=%p req->actual=%d\n",
  621. __func__, __LINE__, req, req->actual);
  622. }
  623.  
  624. if (ret < 0) {
  625. r = ret;
  626. printk(KERN_DEBUG "[%s]\t%d after ret=%d brk ret=%d\n",
  627. __func__, __LINE__, ret, r);
  628. break;
  629. }
  630. }
  631.  
  632. fail:
  633. _unlock(&dev->read_excl);
  634. DEBUG_MTPR("[%s]\t%d: RETURNING Back to USpace r=%d\n",
  635. __func__, __LINE__, r);
  636. return r;
  637.  
  638. }
  639.  
  640. static ssize_t mtpg_write(struct file *fp, const char __user *buf,
  641. size_t count, loff_t *pos)
  642. {
  643. struct mtpg_dev *dev = fp->private_data;
  644. struct usb_request *req = 0;
  645. int r = count, xfer;
  646. int ret;
  647.  
  648.  
  649. DEBUG_MTPW("[%s] \t%d ep bulk_out name = %s\n",
  650. __func__, __LINE__ , dev->bulk_out->name);
  651.  
  652. if (_lock(&dev->write_excl))
  653. return -EBUSY;
  654.  
  655. while (count > 0) {
  656. if (dev->error) {
  657. r = -EIO;
  658. printk(KERN_DEBUG "[%s]%d count>0 dev->error so brk\n",
  659. __func__, __LINE__);
  660. break;
  661. }
  662.  
  663. /* get an idle tx request to use */
  664. req = 0;
  665. ret = wait_event_interruptible(dev->write_wq,
  666. ((req = mtpg_req_get(dev, &dev->tx_idle))
  667. || dev->error));
  668.  
  669. if (ret < 0) {
  670. r = ret;
  671. printk(KERN_DEBUG "[%s]\t%d ret = %d\n",
  672. __func__, __LINE__, r);
  673. break;
  674. }
  675.  
  676. if (req != 0) {
  677. if (count > MTPG_BULK_BUFFER_SIZE)
  678. xfer = MTPG_BULK_BUFFER_SIZE;
  679. else
  680. xfer = count;
  681.  
  682. DEBUG_MTPW("[%s]\t%d copy_from_user length %d\n",
  683. __func__, __LINE__, xfer);
  684.  
  685. if (copy_from_user(req->buf, buf, xfer)) {
  686. printk(KERN_ERR "mtpwrite cpyfrmusr error\n");
  687. r = -EFAULT;
  688. break;
  689. }
  690.  
  691. req->length = xfer;
  692. ret = usb_ep_queue(dev->bulk_in, req, GFP_ATOMIC);
  693. if (ret < 0) {
  694. dev->error = 1;
  695. r = -EIO;
  696. printk(KERN_ERR "[%s]\t%d ep_que ret=%d brk ret=%d\n",
  697. __func__, __LINE__, ret, r);
  698. break;
  699. }
  700.  
  701. buf += xfer;
  702. count -= xfer;
  703.  
  704. /* zero this so we don't try to free it on error exit */
  705. req = 0;
  706. }
  707. }
  708.  
  709. if (req) {
  710. DEBUG_MTPW("[%s] \t%d mtpg_req_put\n", __func__, __LINE__);
  711. mtpg_req_put(dev, &dev->tx_idle, req);
  712. }
  713.  
  714. _unlock(&dev->write_excl);
  715.  
  716. DEBUG_MTPW("[%s]\t%d RETURN back to USpace r=%d\n",
  717. __func__, __LINE__, r);
  718. return r;
  719. }
  720.  
  721. static void interrupt_complete(struct usb_ep *ep, struct usb_request *req)
  722. {
  723. printk(KERN_DEBUG "Finished Writing Interrupt Data\n");
  724. }
  725.  
  726. static ssize_t interrupt_write(struct file *fd,
  727. const char __user *buf, size_t count)
  728. {
  729. struct mtpg_dev *dev = fd->private_data;
  730. struct usb_request *req = 0;
  731. int ret;
  732.  
  733. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  734.  
  735. if (count > MTPG_INTR_BUFFER_SIZE)
  736. return -EINVAL;
  737.  
  738. ret = wait_event_interruptible_timeout(dev->intr_wq,
  739. (req = mtpg_req_get(dev, &dev->intr_idle)),
  740. msecs_to_jiffies(1000));
  741.  
  742. if (!req) {
  743. printk(KERN_ERR "[%s]Alloc has failed\n", __func__);
  744. return -ENOMEM;
  745. }
  746.  
  747. if (copy_from_user(req->buf, buf, count)) {
  748. mtpg_req_put(dev, &dev->intr_idle, req);
  749. printk(KERN_ERR "[%s]copy from user has failed\n", __func__);
  750. return -EIO;
  751. }
  752.  
  753. req->length = count;
  754. /*req->complete = interrupt_complete;*/
  755.  
  756. ret = usb_ep_queue(dev->int_in, req, GFP_ATOMIC);
  757.  
  758. if (ret) {
  759. printk(KERN_ERR "[%s:%d]\n", __func__, __LINE__);
  760. mtpg_req_put(dev, &dev->intr_idle, req);
  761. }
  762.  
  763. DEBUG_MTPB("[%s] \tline = [%d] returning ret is %d\\n",
  764. __func__, __LINE__, ret);
  765. return ret;
  766. }
  767.  
  768. static void read_send_work(struct work_struct *work)
  769. {
  770. struct mtpg_dev *dev = container_of(work, struct mtpg_dev,
  771. read_send_work);
  772. struct usb_composite_dev *cdev = dev->cdev;
  773. struct usb_request *req = 0;
  774. struct usb_container_header *hdr;
  775. struct file *file;
  776. loff_t file_pos = 0;
  777. int64_t count = 0;
  778. int xfer = 0;
  779. int ret = -1;
  780. int hdr_length = 0;
  781. int r = 0;
  782. int ZLP_flag = 0;
  783.  
  784. /* read our parameters */
  785. smp_rmb();
  786. file = dev->read_send_file;
  787. count = dev->read_send_length;
  788. hdr_length = sizeof(struct usb_container_header);
  789. count += hdr_length;
  790.  
  791. printk(KERN_DEBUG "[%s:%d] offset=[%lld]\t leth+hder=[%lld]\n",
  792. __func__, __LINE__, file_pos, count);
  793.  
  794. /* Zero Length Packet should be sent if the last trasfer
  795. * size is equals to the max packet size.
  796. */
  797. if ((count & (dev->bulk_in->maxpacket - 1)) == 0)
  798. ZLP_flag = 1;
  799.  
  800. while (count > 0 || ZLP_flag) {
  801. /*Breaking the loop after sending Zero Length Packet*/
  802. if (count == 0)
  803. ZLP_flag = 0;
  804.  
  805. if (dev->cancel_io == 1) {
  806. dev->cancel_io = 0; /*reported to user space*/
  807. r = -EIO;
  808. printk(KERN_DEBUG "[%s]\t%d ret = %d\n",
  809. __func__, __LINE__, r);
  810. break;
  811. }
  812. /* get an idle tx request to use */
  813. req = 0;
  814. ret = wait_event_interruptible(dev->write_wq,
  815. ((req = mtpg_req_get(dev, &dev->tx_idle))
  816. || dev->error));
  817. if (ret < 0) {
  818. r = ret;
  819. printk(KERN_DEBUG "[%s]\t%d ret = %d\n",
  820. __func__, __LINE__, r);
  821. break;
  822. }
  823.  
  824. if (count > MTPG_BULK_BUFFER_SIZE)
  825. xfer = MTPG_BULK_BUFFER_SIZE;
  826. else
  827. xfer = count;
  828.  
  829. if (hdr_length) {
  830. hdr = (struct usb_container_header *)req->buf;
  831. hdr->Length = __cpu_to_le32(count);
  832. hdr->Type = __cpu_to_le16(2);
  833. hdr->Code = __cpu_to_le16(dev->read_send_cmd);
  834. hdr->TransactionID = __cpu_to_le32(dev->read_send_id);
  835. }
  836.  
  837. ret = vfs_read(file, req->buf + hdr_length,
  838. xfer - hdr_length, &file_pos);
  839. if (ret < 0) {
  840. r = ret;
  841. break;
  842. }
  843. xfer = ret + hdr_length;
  844. hdr_length = 0;
  845.  
  846. req->length = xfer;
  847. ret = usb_ep_queue(dev->bulk_in, req, GFP_KERNEL);
  848. if (ret < 0) {
  849. dev->error = 1;
  850. r = -EIO;
  851. printk(KERN_DEBUG "[%s]\t%d ret = %d\n",
  852. __func__, __LINE__, r);
  853. break;
  854. }
  855.  
  856. count -= xfer;
  857.  
  858. req = 0;
  859. }
  860.  
  861. if (req)
  862. mtpg_req_put(dev, &dev->tx_idle, req);
  863.  
  864. DEBUG_MTPB("[%s] \tline = [%d] \t r = [%d]\n", __func__, __LINE__, r);
  865.  
  866. dev->read_send_result = r;
  867. smp_wmb();
  868. }
  869.  
  870. static long mtpg_ioctl(struct file *fd, unsigned int code, unsigned long arg)
  871. {
  872. struct mtpg_dev *dev = fd->private_data;
  873. struct usb_composite_dev *cdev;
  874. struct usb_request *req;
  875. int status = 0;
  876. int size = 0;
  877. int ret_value = 0;
  878. int max_pkt = 0;
  879. char *buf_ptr = NULL;
  880. char buf[USB_PTPREQUEST_GETSTATUS_SIZE+1] = {0};
  881.  
  882. cdev = dev->cdev;
  883. if (!cdev) {
  884. printk(KERN_ERR "usb: %s cdev not ready\n", __func__);
  885. return -EAGAIN;
  886. }
  887. req = cdev->req;
  888. if (!cdev->req) {
  889. printk(KERN_ERR "usb: %s cdev->req not ready\n", __func__);
  890. return -EAGAIN;
  891. }
  892.  
  893. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  894.  
  895. switch (code) {
  896. case MTP_ONLY_ENABLE:
  897. printk(KERN_DEBUG "[%s:%d] MTP_ONLY_ENABLE ioctl:\n",
  898. __func__, __LINE__);
  899. if (dev->cdev && dev->cdev->gadget) {
  900. usb_gadget_disconnect(cdev->gadget);
  901. printk(KERN_DEBUG "[%s:%d] B4 disconectng gadget\n",
  902. __func__, __LINE__);
  903. msleep(20);
  904. usb_gadget_connect(cdev->gadget);
  905. printk(KERN_DEBUG "[%s:%d] after usb_gadget_connect\n",
  906. __func__, __LINE__);
  907. }
  908. status = 10;
  909. printk(KERN_DEBUG "[%s:%d] MTP_ONLY_ENABLE clearing error 0\n",
  910. __func__, __LINE__);
  911. the_mtpg->error = 0;
  912. break;
  913. case MTP_DISABLE:
  914. /*mtp_function_enable(mtp_disable_desc);*/
  915. if (dev->cdev && dev->cdev->gadget) {
  916. usb_gadget_disconnect(dev->cdev->gadget);
  917. mdelay(5);
  918. usb_gadget_connect(dev->cdev->gadget);
  919. }
  920. break;
  921. case MTP_CLEAR_HALT:
  922. status = usb_ep_clear_halt(dev->bulk_in);
  923. status = usb_ep_clear_halt(dev->bulk_out);
  924. break;
  925. case MTP_WRITE_INT_DATA:
  926. printk(KERN_INFO "[%s]\t%d MTP intrpt_Write no slep\n",
  927. __func__, __LINE__);
  928. ret_value = interrupt_write(fd, (const char *)arg,
  929. MTP_MAX_PACKET_LEN_FROM_APP);
  930. if (ret_value < 0) {
  931. printk(KERN_ERR "[%s]\t%d interptFD failed\n",
  932. __func__, __LINE__);
  933. status = -EIO;
  934. } else {
  935. printk(KERN_DEBUG "[%s]\t%d intruptFD suces\n",
  936. __func__, __LINE__);
  937. status = MTP_MAX_PACKET_LEN_FROM_APP;
  938. }
  939. break;
  940.  
  941. case SET_MTP_USER_PID:
  942. mtp_pid = arg;
  943. printk(KERN_DEBUG "[%s]SET_MTP_USER_PID;pid=%d\tline=[%d]\n",
  944. __func__, mtp_pid, __LINE__);
  945. break;
  946.  
  947. case GET_SETUP_DATA:
  948. buf_ptr = (char *)arg;
  949. printk(KERN_DEBUG "[%s] GET_SETUP_DATA\tline = [%d]\n",
  950. __func__, __LINE__);
  951. if (copy_to_user(buf_ptr, dev->cancel_io_buf,
  952. USB_PTPREQUEST_CANCELIO_SIZE)) {
  953. status = -EIO;
  954. printk(KERN_ERR "[%s]\t%d:coptousr failed\n",
  955. __func__, __LINE__);
  956. }
  957. break;
  958.  
  959. case SEND_RESET_ACK:
  960. /*req->zero = 1;*/
  961. req->length = 0;
  962. /*printk(KERN_DEBUG "[%s]SEND_RESET_ACK and usb_ep_queu
  963. ZERO data size = %d\tline=[%d]\n",
  964. __func__, size, __LINE__);*/
  965. status = usb_ep_queue(cdev->gadget->ep0,
  966. req, GFP_ATOMIC);
  967. if (status < 0)
  968. printk(KERN_ERR "[%s]ep_queue line = [%d]\n",
  969. __func__, __LINE__);
  970. break;
  971.  
  972. case SET_SETUP_DATA:
  973. buf_ptr = (char *)arg;
  974. if (copy_from_user(buf, buf_ptr,
  975. USB_PTPREQUEST_GETSTATUS_SIZE)) {
  976. status = -EIO;
  977. printk(KERN_ERR "[%s]\t%d:copyfrmuser fail\n",
  978. __func__, __LINE__);
  979. break;
  980. }
  981. size = buf[0];
  982. printk(KERN_DEBUG "[%s]SET_SETUP_DATA size=%d line=[%d]\n",
  983. __func__, size, __LINE__);
  984. memcpy(req->buf, buf, size);
  985. req->zero = 0;
  986. req->length = size;
  987. status = usb_ep_queue(cdev->gadget->ep0, req,
  988. GFP_ATOMIC);
  989. if (status < 0)
  990. printk(KERN_ERR "[%s]usbepqueue line=[%d]\n",
  991. __func__, __LINE__);
  992. break;
  993.  
  994. case SET_ZLP_DATA:
  995. /*req->zero = 1;*/
  996. req = mtpg_req_get(dev, &dev->tx_idle);
  997. if (!req) {
  998. printk(KERN_DEBUG "[%s] Failed to get ZLP_DATA\n",
  999. __func__);
  1000. return -EAGAIN;
  1001. }
  1002. req->length = 0;
  1003. printk(KERN_DEBUG "[%s]ZLP_DATA data=%d\tline=[%d]\n",
  1004. __func__, size, __LINE__);
  1005. status = usb_ep_queue(dev->bulk_in, req, GFP_ATOMIC);
  1006. if (status < 0) {
  1007. printk(KERN_ERR "[%s]usbepqueue line=[%d]\n",
  1008. __func__, __LINE__);
  1009. } else {
  1010. printk(KERN_DEBUG "%sZLPstatus=%d\tline=%d\n",
  1011. __func__, __LINE__, status);
  1012. status = 20;
  1013. }
  1014. break;
  1015.  
  1016. case GET_HIGH_FULL_SPEED:
  1017. printk(KERN_DEBUG "[%s]GET_HIGH_FULLSPEED line=[%d]\n",
  1018. __func__, __LINE__);
  1019. max_pkt = dev->bulk_in->maxpacket;
  1020. printk(KERN_DEBUG "[%s] line = %d max_pkt = [%d]\n",
  1021. __func__, __LINE__, max_pkt);
  1022. if (max_pkt == 64)
  1023. status = 64;
  1024. else
  1025. status = 512;
  1026. break;
  1027. case SEND_FILE_WITH_HEADER:
  1028. {
  1029. struct read_send_info info;
  1030. struct work_struct *work;
  1031. struct file *file = NULL;
  1032. printk(KERN_DEBUG "[%s]SEND_FILE_WITH_HEADER line=[%d]\n",
  1033. __func__, __LINE__);
  1034.  
  1035. if (copy_from_user(&info, (void __user *)arg, sizeof(info))) {
  1036. status = -EFAULT;
  1037. goto exit;
  1038. }
  1039.  
  1040. file = fget(info.Fd);
  1041. if (!file) {
  1042. status = -EBADF;
  1043. printk(KERN_DEBUG "[%s] line=[%d] bad file number\n",
  1044. __func__, __LINE__);
  1045. goto exit;
  1046. }
  1047.  
  1048. dev->read_send_file = file;
  1049. dev->read_send_length = info.Length;
  1050. smp_wmb();
  1051.  
  1052. work = &dev->read_send_work;
  1053. dev->read_send_cmd = info.Code;
  1054. dev->read_send_id = info.TransactionID;
  1055. queue_work(dev->wq, work);
  1056. /* Wait for the work to be complted on work queue */
  1057. flush_workqueue(dev->wq);
  1058.  
  1059. fput(file);
  1060.  
  1061. smp_rmb();
  1062. status = dev->read_send_result;
  1063. break;
  1064. }
  1065. default:
  1066. status = -ENOTTY;
  1067. }
  1068. exit:
  1069. return status;
  1070. }
  1071.  
  1072. static int mtpg_release_device(struct inode *ip, struct file *fp)
  1073. {
  1074. printk(KERN_DEBUG "[%s]\tline = [%d]\n", __func__, __LINE__);
  1075. if (the_mtpg != NULL)
  1076. _unlock(&the_mtpg->open_excl);
  1077. return 0;
  1078. }
  1079.  
  1080. /* file operations for MTP device /dev/usb_mtp_gadget */
  1081. static const struct file_operations mtpg_fops = {
  1082. .owner = THIS_MODULE,
  1083. .read = mtpg_read,
  1084. .write = mtpg_write,
  1085. .open = mtpg_open,
  1086. .unlocked_ioctl = mtpg_ioctl,
  1087. .release = mtpg_release_device,
  1088. };
  1089.  
  1090. static struct miscdevice mtpg_device = {
  1091. .minor = MISC_DYNAMIC_MINOR,
  1092. .name = shortname,
  1093. .fops = &mtpg_fops,
  1094. };
  1095.  
  1096. struct usb_request *alloc_ep_req(struct usb_ep *ep,
  1097. unsigned len, gfp_t kmalloc_flags)
  1098. {
  1099. struct usb_request *req;
  1100.  
  1101. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  1102. req = usb_ep_alloc_request(ep, GFP_ATOMIC);
  1103. if (req) {
  1104. req->length = len;
  1105. req->buf = kmalloc(len, GFP_ATOMIC);
  1106. if (!req->buf) {
  1107. usb_ep_free_request(ep, req);
  1108. req = NULL;
  1109. }
  1110. }
  1111. return req;
  1112. }
  1113.  
  1114. static void mtpg_request_free(struct usb_request *req, struct usb_ep *ep)
  1115. {
  1116.  
  1117. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  1118. if (req) {
  1119. kfree(req->buf);
  1120. usb_ep_free_request(ep, req);
  1121. }
  1122. }
  1123.  
  1124. static struct usb_request *mtpg_request_new(struct usb_ep *ep, int buffer_size)
  1125. {
  1126.  
  1127. struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
  1128.  
  1129. DEBUG_MTPB("[%s] \tline = [%d]\n", __func__, __LINE__);
  1130. if (!req) {
  1131. printk(KERN_ERR "[%s]\tline %d ERROR\n", __func__, __LINE__);
  1132. return NULL;
  1133. }
  1134.  
  1135. /* now allocate buffers for the requests */
  1136. req->buf = kmalloc(buffer_size, GFP_KERNEL);
  1137. if (!req->buf) {
  1138. usb_ep_free_request(ep, req);
  1139. return NULL;
  1140. }
  1141.  
  1142. return req;
  1143. }
  1144.  
  1145. static void mtpg_complete_in(struct usb_ep *ep, struct usb_request *req)
  1146. {
  1147. struct mtpg_dev *dev = the_mtpg;
  1148. DEBUG_MTPB("[%s]\t %d req->status is = %d\n",
  1149. __func__, __LINE__, req->status);
  1150.  
  1151. if (req->status != 0)
  1152. dev->error = 1;
  1153.  
  1154. mtpg_req_put(dev, &dev->tx_idle, req);
  1155. wake_up(&dev->write_wq);
  1156. }
  1157.  
  1158. static void mtpg_complete_out(struct usb_ep *ep, struct usb_request *req)
  1159. {
  1160. struct mtpg_dev *dev = the_mtpg;
  1161.  
  1162. DEBUG_MTPB("[%s]\tline = [%d]req->status is = %d\n",
  1163. __func__, __LINE__, req->status);
  1164. if (req->status != 0) {
  1165. dev->error = 1;
  1166.  
  1167. DEBUG_MTPB("[%s]\t%d dev->error is=%d for rx_idle\n",
  1168. __func__, __LINE__, dev->error);
  1169. mtpg_req_put(dev, &dev->rx_idle, req);
  1170. } else {
  1171. DEBUG_MTPB("[%s]\t%d for rx_done\n", __func__, __LINE__);
  1172. mtpg_req_put(dev, &dev->rx_done, req);
  1173. }
  1174. wake_up(&dev->read_wq);
  1175. }
  1176.  
  1177. static void mtpg_complete_intr(struct usb_ep *ep, struct usb_request *req)
  1178. {
  1179. struct mtpg_dev *dev = the_mtpg;
  1180. /*printk(KERN_INFO "[%s]\tline = [%d]\n", __func__, __LINE__);*/
  1181.  
  1182. if (req->status != 0)
  1183. dev->error = 1;
  1184.  
  1185. mtpg_req_put(dev, &dev->intr_idle, req);
  1186.  
  1187. wake_up(&dev->intr_wq);
  1188. }
  1189.  
  1190. static void
  1191. mtpg_function_unbind(struct usb_configuration *c, struct usb_function *f)
  1192. {
  1193. struct mtpg_dev *dev = mtpg_func_to_dev(f);
  1194. struct usb_request *req;
  1195.  
  1196. printk(KERN_DEBUG "[%s]\tline = [%d]\n", __func__, __LINE__);
  1197.  
  1198. while ((req = mtpg_req_get(dev, &dev->rx_idle)))
  1199. mtpg_request_free(req, dev->bulk_out);
  1200.  
  1201. while ((req = mtpg_req_get(dev, &dev->tx_idle)))
  1202. mtpg_request_free(req, dev->bulk_in);
  1203.  
  1204. while ((req = mtpg_req_get(dev, &dev->intr_idle)))
  1205. mtpg_request_free(req, dev->int_in);
  1206. }
  1207.  
  1208. static int
  1209. mtpg_function_bind(struct usb_configuration *c, struct usb_function *f)
  1210. {
  1211. struct usb_composite_dev *cdev = c->cdev;
  1212. struct mtpg_dev *mtpg = mtpg_func_to_dev(f);
  1213. struct usb_request *req;
  1214. struct usb_ep *ep;
  1215. int i, id;
  1216.  
  1217. /* Allocate string descriptor numbers ... note that string
  1218. * contents can be overridden by the composite_dev glue.
  1219. */
  1220.  
  1221. printk(KERN_DEBUG "[%s]\tline = [%d]\n", __func__, __LINE__);
  1222.  
  1223. id = usb_interface_id(c, f);
  1224. if (id < 0) {
  1225. printk(KERN_ERR "[%s]Error in usb_interface_id\n", __func__);
  1226. return id;
  1227. }
  1228.  
  1229. mtpg_interface_desc.bInterfaceNumber = id;
  1230.  
  1231. ep = usb_ep_autoconfig(cdev->gadget, &fs_mtpg_in_desc);
  1232. if (!ep) {
  1233. printk(KERN_ERR "[%s]Error usb_ep_autoconfig IN\n", __func__);
  1234. goto autoconf_fail;
  1235. }
  1236. ep->driver_data = mtpg; /* claim the endpoint */
  1237. mtpg->bulk_in = ep;
  1238. the_mtpg->bulk_in = ep;
  1239.  
  1240. ep = usb_ep_autoconfig(cdev->gadget, &fs_mtpg_out_desc);
  1241. if (!ep) {
  1242. printk(KERN_ERR "[%s]Eror usb_ep_autoconfig OUT\n", __func__);
  1243. goto autoconf_fail;
  1244. }
  1245. ep->driver_data = mtpg; /* claim the endpoint */
  1246. mtpg->bulk_out = ep;
  1247. the_mtpg->bulk_out = ep;
  1248.  
  1249. /* Interrupt Support for MTP */
  1250. ep = usb_ep_autoconfig(cdev->gadget, &int_fs_notify_desc);
  1251. if (!ep) {
  1252. printk(KERN_ERR "[%s]Eror usb_ep_autoconfig INT\n", __func__);
  1253. goto autoconf_fail;
  1254. }
  1255. ep->driver_data = mtpg;
  1256. mtpg->int_in = ep;
  1257. the_mtpg->int_in = ep;
  1258.  
  1259. for (i = 0; i < MTPG_INTR_REQ_MAX; i++) {
  1260. req = mtpg_request_new(mtpg->int_in, MTPG_INTR_BUFFER_SIZE);
  1261. if (!req)
  1262. goto out;
  1263. req->complete = mtpg_complete_intr;
  1264. mtpg_req_put(mtpg, &mtpg->intr_idle, req);
  1265. }
  1266. for (i = 0; i < MTPG_RX_REQ_MAX; i++) {
  1267. req = mtpg_request_new(mtpg->bulk_out, MTPG_BULK_BUFFER_SIZE);
  1268. if (!req)
  1269. goto out;
  1270. req->complete = mtpg_complete_out;
  1271. mtpg_req_put(mtpg, &mtpg->rx_idle, req);
  1272. }
  1273.  
  1274. for (i = 0; i < MTPG_MTPG_TX_REQ_MAX; i++) {
  1275. req = mtpg_request_new(mtpg->bulk_in, MTPG_BULK_BUFFER_SIZE);
  1276. if (!req)
  1277. goto out;
  1278. req->complete = mtpg_complete_in;
  1279. mtpg_req_put(mtpg, &mtpg->tx_idle, req);
  1280. }
  1281.  
  1282. if (gadget_is_dualspeed(cdev->gadget)) {
  1283.  
  1284. DEBUG_MTPB("[%s]\tdual speed line = [%d]\n",
  1285. __func__, __LINE__);
  1286.  
  1287. /* Assume endpoint addresses are the same for both speeds */
  1288. hs_mtpg_in_desc.bEndpointAddress =
  1289. fs_mtpg_in_desc.bEndpointAddress;
  1290. hs_mtpg_out_desc.bEndpointAddress =
  1291. fs_mtpg_out_desc.bEndpointAddress;
  1292. int_hs_notify_desc.bEndpointAddress =
  1293. int_fs_notify_desc.bEndpointAddress;
  1294. }
  1295.  
  1296. mtpg->cdev = cdev;
  1297. the_mtpg->cdev = cdev;
  1298.  
  1299. return 0;
  1300.  
  1301. autoconf_fail:
  1302. printk(KERN_ERR "mtpg unable to autoconfigure all endpoints\n");
  1303. return -ENOTSUPP;
  1304. out:
  1305. mtpg_function_unbind(c, f);
  1306. return -1;
  1307. }
  1308.  
  1309. static int mtpg_function_set_alt(struct usb_function *f,
  1310. unsigned intf, unsigned alt)
  1311. {
  1312. struct mtpg_dev *dev = mtpg_func_to_dev(f);
  1313. struct usb_composite_dev *cdev = f->config->cdev;
  1314. int ret;
  1315.  
  1316. if (dev->int_in->driver_data)
  1317. usb_ep_disable(dev->int_in);
  1318.  
  1319. ret = usb_ep_enable(dev->int_in,
  1320. ep_choose(cdev->gadget, &int_hs_notify_desc,
  1321. &int_fs_notify_desc));
  1322. if (ret) {
  1323. usb_ep_disable(dev->int_in);
  1324. dev->int_in->driver_data = NULL;
  1325. printk(KERN_ERR "[%s]Error in enabling INT EP\n", __func__);
  1326. return ret;
  1327. }
  1328. dev->int_in->driver_data = dev;
  1329.  
  1330. if (dev->bulk_in->driver_data)
  1331. usb_ep_disable(dev->bulk_in);
  1332.  
  1333. ret = usb_ep_enable(dev->bulk_in,
  1334. ep_choose(cdev->gadget, &hs_mtpg_in_desc,
  1335. &fs_mtpg_in_desc));
  1336. if (ret) {
  1337. usb_ep_disable(dev->bulk_in);
  1338. dev->bulk_in->driver_data = NULL;
  1339. printk(KERN_ERR "[%s] Enable Bulk-IN EP error%d\n",
  1340. __func__, __LINE__);
  1341. return ret;
  1342. }
  1343. dev->bulk_in->driver_data = dev;
  1344.  
  1345. if (dev->bulk_out->driver_data)
  1346. usb_ep_disable(dev->bulk_out);
  1347.  
  1348. ret = usb_ep_enable(dev->bulk_out,
  1349. ep_choose(cdev->gadget, &hs_mtpg_out_desc,
  1350. &fs_mtpg_out_desc));
  1351. if (ret) {
  1352. usb_ep_disable(dev->bulk_out);
  1353. dev->bulk_out->driver_data = NULL;
  1354. printk(KERN_ERR "[%s] Enable Bulk-Out EP error%d\n",
  1355. __func__, __LINE__);
  1356. return ret;
  1357. }
  1358. dev->bulk_out->driver_data = dev;
  1359.  
  1360. dev->online = 1;
  1361. dev->error = 0;
  1362. dev->read_ready = 1;
  1363. dev->cancel_io = 0;
  1364.  
  1365. /* readers may be blocked waiting for us to go online */
  1366. wake_up(&dev->read_wq);
  1367.  
  1368. return 0;
  1369. }
  1370.  
  1371. static void mtpg_function_disable(struct usb_function *f)
  1372. {
  1373. struct mtpg_dev *dev = mtpg_func_to_dev(f);
  1374.  
  1375. printk(KERN_DEBUG "[%s]\tline = [%d]\n", __func__, __LINE__);
  1376. dev->online = 0;
  1377. dev->error = 1;
  1378.  
  1379. usb_ep_disable(dev->int_in);
  1380. dev->int_in->driver_data = NULL;
  1381.  
  1382. usb_ep_disable(dev->bulk_in);
  1383. dev->bulk_in->driver_data = NULL;
  1384.  
  1385. usb_ep_disable(dev->bulk_out);
  1386. dev->bulk_out->driver_data = NULL;
  1387.  
  1388. wake_up(&dev->read_wq);
  1389. }
  1390.  
  1391.  
  1392. /*PIMA15740-2000 spec: Class specific setup request for MTP*/
  1393. static void
  1394. mtp_complete_cancel_io(struct usb_ep *ep, struct usb_request *req)
  1395. {
  1396. int i;
  1397. struct mtpg_dev *dev = ep->driver_data;
  1398.  
  1399. DEBUG_MTPB("[%s]\tline = [%d]\n", __func__, __LINE__);
  1400. if (req->status != 0) {
  1401. DEBUG_MTPB("[%s]req->status !=0\tline = [%d]\n",
  1402. __func__, __LINE__);
  1403. return;
  1404. }
  1405.  
  1406. if (req->actual != USB_PTPREQUEST_CANCELIO_SIZE) {
  1407. DEBUG_MTPB("[%s]USB_PTPREQUEST_CANCELIO_SIZE line = [%d]\n",
  1408. __func__, __LINE__);
  1409. usb_ep_set_halt(ep);
  1410.  
  1411. } else {
  1412. memset(dev->cancel_io_buf, 0, USB_PTPREQUEST_CANCELIO_SIZE+1);
  1413. memcpy(dev->cancel_io_buf, req->buf,
  1414. USB_PTPREQUEST_CANCELIO_SIZE);
  1415. dev->cancel_io = 1;
  1416. /*Debugging*/
  1417. for (i = 0; i < USB_PTPREQUEST_CANCELIO_SIZE; i++)
  1418. DEBUG_MTPB("[%s]cancel_io_buf[%d]=%x\tline = [%d]\n",
  1419. __func__, i, dev->cancel_io_buf[i], __LINE__);
  1420. mtp_send_signal(USB_PTPREQUEST_CANCELIO);
  1421. }
  1422.  
  1423. }
  1424.  
  1425. static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
  1426. const struct usb_ctrlrequest *ctrl)
  1427. {
  1428. struct mtpg_dev *dev = the_mtpg;
  1429. struct usb_request *req = cdev->req;
  1430. int signal_request = 0;
  1431. int value = -EOPNOTSUPP;
  1432. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1433. u16 w_value = le16_to_cpu(ctrl->wValue);
  1434. u16 w_length = le16_to_cpu(ctrl->wLength);
  1435.  
  1436. if (ctrl->bRequestType ==
  1437. (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
  1438. && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
  1439. && (w_value >> 8) == USB_DT_STRING
  1440. && (w_value & 0xFF) == MTPG_OS_STRING_ID) {
  1441. value = (w_length < sizeof(mtpg_os_string)
  1442. ? w_length : sizeof(mtpg_os_string));
  1443. memcpy(cdev->req->buf, mtpg_os_string, value);
  1444. if (value >= 0) {
  1445. int rc;
  1446. cdev->req->zero = value < w_length;
  1447. cdev->req->length = value;
  1448.  
  1449. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1450. if (rc < 0)
  1451. printk(KERN_DEBUG "[%s:%d] setup queue error\n",
  1452. __func__, __LINE__);
  1453. }
  1454. return value;
  1455. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
  1456. if ((ctrl->bRequest == 1 || ctrl->bRequest == 0x54 ||
  1457. ctrl->bRequest == 0x6F || ctrl->bRequest == 0xFE)
  1458. && (ctrl->bRequestType & USB_DIR_IN)
  1459. && (w_index == 4 || w_index == 5)) {
  1460. value = (w_length < sizeof(mtpg_ext_config_desc) ?
  1461. w_length : sizeof(mtpg_ext_config_desc));
  1462. memcpy(cdev->req->buf, &mtpg_ext_config_desc, value);
  1463.  
  1464. if (value >= 0) {
  1465. int rc;
  1466. cdev->req->zero = value < w_length;
  1467. cdev->req->length = value;
  1468. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1469. if (rc < 0)
  1470. printk(KERN_DEBUG "[%s:%d] setup queue error\n",
  1471. __func__, __LINE__);
  1472. }
  1473. return value;
  1474. }
  1475. printk(KERN_DEBUG "mtp_ctrlrequest "
  1476. "%02x.%02x v%04x i%04x l%u\n",
  1477. ctrl->bRequestType, ctrl->bRequest,
  1478. w_value, w_index, w_length);
  1479. }
  1480.  
  1481. switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  1482. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  1483. | USB_PTPREQUEST_CANCELIO:
  1484. DEBUG_MTPB("[%s]\tline=[%d]w_v=%x, w_i=%x, w_l=%x\n",
  1485. __func__, __LINE__, w_value, w_index, w_length);
  1486. /* if (w_value == 0x00 && w_index ==
  1487. mtpg_interface_desc.bInterfaceNumber
  1488. && w_length == 0x06) */
  1489. if (w_value == 0x00 && w_length == 0x06) {
  1490. DEBUG_MTPB("[%s]PTPREQUESTCANCLIO line[%d]\n",
  1491. __func__, __LINE__);
  1492. value = w_length;
  1493. cdev->gadget->ep0->driver_data = dev;
  1494. req->complete = mtp_complete_cancel_io;
  1495. req->zero = 0;
  1496. req->length = value;
  1497. value = usb_ep_queue(cdev->gadget->ep0,
  1498. req, GFP_ATOMIC);
  1499. if (value < 0) {
  1500. printk(KERN_ERR "[%s:%d]Error usb_ep_queue\n",
  1501. __func__, __LINE__);
  1502. } else
  1503. DEBUG_MTPB("[%s] ep-queue-sucecc line[%d]\n",
  1504. __func__, __LINE__);
  1505. }
  1506. return value;
  1507. break;
  1508.  
  1509. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  1510. | USB_PTPREQUEST_RESET:
  1511. DEBUG_MTPB("[%s] USB_PTPREQUEST_RESET\tline = [%d]\n",
  1512. __func__, __LINE__);
  1513. signal_request = USB_PTPREQUEST_RESET;
  1514. break;
  1515.  
  1516. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  1517. | USB_PTPREQUEST_GETSTATUS:
  1518. signal_request = USB_PTPREQUEST_GETSTATUS;
  1519. break;
  1520.  
  1521. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
  1522. | USB_PTPREQUEST_GETEVENT:
  1523. signal_request = USB_PTPREQUEST_GETEVENT;
  1524. break;
  1525.  
  1526. default:
  1527. DEBUG_MTPB("[%s] INVALID REQUEST \tline = [%d]\n",
  1528. __func__, __LINE__);
  1529. return value;
  1530. }
  1531.  
  1532. value = mtp_send_signal(signal_request);
  1533. return value;
  1534. }
  1535.  
  1536. static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
  1537. {
  1538. struct mtpg_dev *mtpg = the_mtpg;
  1539. int status = 0;
  1540.  
  1541. if (strings_dev_mtp[F_MTP_IDX].id == 0) {
  1542. status = usb_string_id(c->cdev);
  1543.  
  1544. if (status < 0)
  1545. return status;
  1546.  
  1547. strings_dev_mtp[F_MTP_IDX].id = status;
  1548. mtpg_interface_desc.iInterface = status;
  1549. }
  1550.  
  1551. mtpg->cdev = c->cdev;
  1552. mtpg->function.name = mtpg_longname;
  1553. mtpg->function.strings = mtpg_dev_strings;
  1554.  
  1555. /*Test the switch */
  1556. if (ptp_config) {
  1557. mtpg->function.descriptors = fs_ptp_descs;
  1558. mtpg->function.hs_descriptors = hs_ptp_descs;
  1559. } else {
  1560. mtpg->function.descriptors = fs_mtpg_desc;
  1561. mtpg->function.hs_descriptors = hs_mtpg_desc;
  1562. }
  1563.  
  1564. mtpg->function.bind = mtpg_function_bind;
  1565. mtpg->function.unbind = mtpg_function_unbind;
  1566. mtpg->function.set_alt = mtpg_function_set_alt;
  1567. mtpg->function.disable = mtpg_function_disable;
  1568. #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE
  1569. mtpg->function.set_config_desc = mtp_set_config_desc;
  1570. #endif
  1571.  
  1572. return usb_add_function(c, &mtpg->function);
  1573. }
  1574.  
  1575. static int mtp_setup(void)
  1576. {
  1577. struct mtpg_dev *mtpg;
  1578. int rc;
  1579.  
  1580. printk(KERN_DEBUG "[%s] \tline = [%d]\n", __func__, __LINE__);
  1581. mtpg = kzalloc(sizeof(*mtpg), GFP_KERNEL);
  1582. if (!mtpg) {
  1583. printk(KERN_ERR "mtpg_dev_alloc memory failed\n");
  1584. return -ENOMEM;
  1585. }
  1586.  
  1587. spin_lock_init(&mtpg->lock);
  1588. init_waitqueue_head(&mtpg->intr_wq);
  1589. init_waitqueue_head(&mtpg->read_wq);
  1590. init_waitqueue_head(&mtpg->write_wq);
  1591.  
  1592. atomic_set(&mtpg->open_excl, 0);
  1593. atomic_set(&mtpg->read_excl, 0);
  1594. atomic_set(&mtpg->write_excl, 0);
  1595. atomic_set(&mtpg->wintfd_excl, 0);
  1596.  
  1597. INIT_LIST_HEAD(&mtpg->rx_idle);
  1598. INIT_LIST_HEAD(&mtpg->rx_done);
  1599. INIT_LIST_HEAD(&mtpg->tx_idle);
  1600. INIT_LIST_HEAD(&mtpg->intr_idle);
  1601. mtpg->wq = create_singlethread_workqueue("mtp_read_send");
  1602. if (!mtpg->wq) {
  1603. printk(KERN_ERR "mtpg_dev_alloc work queue creation failed\n");
  1604. rc = -ENOMEM;
  1605. goto err_work;
  1606. }
  1607.  
  1608. INIT_WORK(&mtpg->read_send_work, read_send_work);
  1609.  
  1610. /* the_mtpg must be set before calling usb_gadget_register_driver */
  1611. the_mtpg = mtpg;
  1612.  
  1613. rc = misc_register(&mtpg_device);
  1614. if (rc != 0) {
  1615. printk(KERN_ERR " misc_register of mtpg Failed\n");
  1616. goto err_misc_register;
  1617. }
  1618.  
  1619. return 0;
  1620. err_work:
  1621. err_misc_register:
  1622. the_mtpg = NULL;
  1623. kfree(mtpg);
  1624. printk(KERN_ERR "mtp gadget driver failed to initialize\n");
  1625. return rc;
  1626. }
  1627.  
  1628. static void mtp_cleanup(void)
  1629. {
  1630. struct mtpg_dev *mtpg = the_mtpg;
  1631. printk(KERN_DEBUG "[%s:::%d]\n", __func__, __LINE__);
  1632.  
  1633. if (!mtpg)
  1634. return;
  1635.  
  1636. misc_deregister(&mtpg_device);
  1637. the_mtpg = NULL;
  1638. kfree(mtpg);
  1639. }
  1640.  
  1641. MODULE_AUTHOR("Deepak And Madhukar");
  1642. MODULE_LICENSE("GPL");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement