Advertisement
Guest User

Vmware patch linux kernel 3.17

a guest
Oct 8th, 2014
1,022
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 36.89 KB | None | 0 0
  1. diff -ur a/vmblock-only/linux/control.c b/vmblock-only/linux/control.c
  2. --- a/vmblock-only/linux/control.c  2014-04-15 01:41:40.000000000 +0400
  3. +++ b/vmblock-only/linux/control.c  2014-10-09 05:21:34.094551409 +0400
  4. @@ -208,9 +208,11 @@
  5.     VMBlockSetProcEntryOwner(controlProcMountpoint);
  6.  
  7.     /* Create /proc/fs/vmblock/dev */
  8. -   controlProcEntry = create_proc_entry(VMBLOCK_CONTROL_DEVNAME,
  9. -                                        VMBLOCK_CONTROL_MODE,
  10. -                                        controlProcDirEntry);
  11. +   controlProcEntry = proc_create(VMBLOCK_CONTROL_DEVNAME,
  12. +                 VMBLOCK_CONTROL_MODE,
  13. +                                  controlProcDirEntry,
  14. +                 &ControlFileOps);
  15. +
  16.     if (!controlProcEntry) {
  17.        Warning("SetupProcDevice: could not create " VMBLOCK_DEVICE "\n");
  18.        remove_proc_entry(VMBLOCK_CONTROL_MOUNTPOINT, controlProcDirEntry);
  19. @@ -218,7 +220,10 @@
  20.        return -EINVAL;
  21.     }
  22.  
  23. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  24.     controlProcEntry->proc_fops = &ControlFileOps;
  25. +#endif
  26. +
  27.     return 0;
  28.  }
  29.  
  30. @@ -272,28 +277,65 @@
  31.   *----------------------------------------------------------------------------
  32.   */
  33.  
  34. +/* Simple version kernel's getname_flags() by pavlinux
  35. +*/
  36. +static char *fast_getname(const char __user *filename)
  37. +{
  38. +    int ret = 0;
  39. +    int len;
  40. +    char *tmp = __getname();
  41. +
  42. +    if (!tmp)
  43. +   return ERR_PTR(-ENOMEM);
  44. +
  45. +    len = strncpy_from_user(tmp, filename, PATH_MAX);
  46. +
  47. +    if (len == 0)
  48. +        ret = -ENOENT;
  49. +    else if (len > PATH_MAX)
  50. +   ret = -ENAMETOOLONG;
  51. +
  52. +    if (ret) {
  53. +   __putname(tmp);
  54. +   tmp =  ERR_PTR(ret);
  55. +    }
  56. +    return tmp;
  57. +}
  58. +
  59.  static int
  60.  ExecuteBlockOp(const char __user *buf,                // IN: buffer with name
  61.                 const os_blocker_id_t blocker,         // IN: blocker ID (file)
  62.                 int (*blockOp)(const char *filename,   // IN: block operation
  63.                                const os_blocker_id_t blocker))
  64.  {
  65. -   char *name;
  66. +   struct filename *fn = NULL;
  67. +   char *name = (char *)fn->name;
  68.     int i;
  69.     int retval;
  70.  
  71. -   name = getname(buf);
  72. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  73. +   name = (char *)getname(buf);
  74. +#else
  75. +   name = (char *)fast_getname(buf);
  76. +#endif
  77.     if (IS_ERR(name)) {
  78.        return PTR_ERR(name);
  79.     }
  80.  
  81. +    /* My lovely Vmware use functions from fs/namei.c */
  82.     for (i = strlen(name) - 1; i >= 0 && name[i] == '/'; i--) {
  83.        name[i] = '\0';
  84.     }
  85.  
  86.     retval = i < 0 ? -EINVAL : blockOp(name, blocker);
  87.  
  88. -   putname(name);
  89. +   fn->name = name;
  90. +   if (fn->separate) { /* add by pavlinux */
  91. +   __putname(fn->name);
  92. +        kfree(fn);
  93. +   } else {
  94. +   __putname(fn);
  95. +   }
  96.  
  97.     return retval;
  98.  }
  99. diff -ur a/vmblock-only/linux/dentry.c b/vmblock-only/linux/dentry.c
  100. --- a/vmblock-only/linux/dentry.c   2014-04-15 01:41:40.000000000 +0400
  101. +++ b/vmblock-only/linux/dentry.c   2014-07-18 16:42:42.000000000 +0400
  102. @@ -32,7 +32,11 @@
  103.  #include "block.h"
  104.  
  105.  
  106. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  107.  static int DentryOpRevalidate(struct dentry *dentry, struct nameidata *nd);
  108. +#else
  109. +static int DentryOpRevalidate(struct dentry *dentry, unsigned int);
  110. +#endif
  111.  
  112.  struct dentry_operations LinkDentryOps = {
  113.     .d_revalidate = DentryOpRevalidate,
  114. @@ -58,9 +62,12 @@
  115.   *----------------------------------------------------------------------------
  116.   */
  117.  
  118. -static int
  119. -DentryOpRevalidate(struct dentry *dentry,  // IN: dentry revalidating
  120. -                   struct nameidata *nd)   // IN: lookup flags & intent
  121. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
  122. +static int DentryOpRevalidate(struct dentry *dentry, struct nameidata *nd)
  123. +#else
  124. +static int DentryOpRevalidate(struct dentry *dentry, unsigned int flags)
  125. +#endif
  126. +
  127.  {
  128.     VMBlockInodeInfo *iinfo;
  129.     struct nameidata actualNd;
  130. @@ -101,7 +108,11 @@
  131.     if (actualDentry &&
  132.         actualDentry->d_op &&
  133.         actualDentry->d_op->d_revalidate) {
  134. +#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)
  135. +      return actualDentry->d_op->d_revalidate(actualDentry, flags);
  136. +#else
  137.        return actualDentry->d_op->d_revalidate(actualDentry, nd);
  138. +#endif
  139.     }
  140.  
  141.     if (compat_path_lookup(iinfo->name, 0, &actualNd)) {
  142. diff -ur a/vmblock-only/linux/file.c b/vmblock-only/linux/file.c
  143. --- a/vmblock-only/linux/file.c 2014-04-15 01:41:40.000000000 +0400
  144. +++ b/vmblock-only/linux/file.c 2014-09-27 02:30:10.000000000 +0400
  145. @@ -63,6 +63,7 @@
  146.   *----------------------------------------------------------------------------
  147.   */
  148.  
  149. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  150.  static int
  151.  Filldir(void *buf,              // IN: Dirent buffer passed from FileOpReaddir
  152.          const char *name,       // IN: Dirent name
  153. @@ -76,7 +77,7 @@
  154.     /* Specify DT_LNK regardless */
  155.     return info->filldir(info->dirent, name, namelen, offset, ino, DT_LNK);
  156.  }
  157. -
  158. +#endif
  159.  
  160.  /* File operations */
  161.  
  162. @@ -164,6 +165,7 @@
  163.   *----------------------------------------------------------------------------
  164.   */
  165.  
  166. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  167.  static int
  168.  FileOpReaddir(struct file *file,  // IN
  169.                void *dirent,       // IN
  170. @@ -193,7 +195,7 @@
  171.  
  172.     return ret;
  173.  }
  174. -
  175. +#endif
  176.  
  177.  /*
  178.   *----------------------------------------------------------------------------
  179. @@ -235,9 +237,12 @@
  180.     return ret;
  181.  }
  182.  
  183. -
  184.  struct file_operations RootFileOps = {
  185. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  186.     .readdir = FileOpReaddir,
  187. +#endif
  188. +   .owner = THIS_MODULE,
  189. +   .llseek = no_llseek,
  190.     .open    = FileOpOpen,
  191.     .release = FileOpRelease,
  192.  };
  193. diff -ur a/vmblock-only/linux/inode.c b/vmblock-only/linux/inode.c
  194. --- a/vmblock-only/linux/inode.c    2014-04-15 01:41:40.000000000 +0400
  195. +++ b/vmblock-only/linux/inode.c    2014-09-27 02:41:45.000000000 +0400
  196. @@ -35,9 +35,15 @@
  197.  
  198.  
  199.  /* Inode operations */
  200. -static struct dentry *InodeOpLookup(struct inode *dir,
  201. -                                    struct dentry *dentry, struct nameidata *nd);
  202. +
  203. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  204. +static struct dentry *InodeOpLookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
  205.  static int InodeOpReadlink(struct dentry *dentry, char __user *buffer, int buflen);
  206. +#else
  207. +static struct dentry *InodeOpLookup(struct inode *, struct dentry *, unsigned int);
  208. +static int InodeOpReadlink(struct dentry *, char __user *, int);
  209. +#endif
  210. +
  211.  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
  212.  static void *InodeOpFollowlink(struct dentry *dentry, struct nameidata *nd);
  213.  #else
  214. @@ -49,12 +55,15 @@
  215.     .lookup = InodeOpLookup,
  216.  };
  217.  
  218. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  219.  static struct inode_operations LinkInodeOps = {
  220. +#else
  221. +struct inode_operations LinkInodeOps = {
  222. +#endif
  223.     .readlink    = InodeOpReadlink,
  224.     .follow_link = InodeOpFollowlink,
  225.  };
  226.  
  227. -
  228.  /*
  229.   *----------------------------------------------------------------------------
  230.   *
  231. @@ -75,7 +84,11 @@
  232.  static struct dentry *
  233.  InodeOpLookup(struct inode *dir,      // IN: parent directory's inode
  234.                struct dentry *dentry,  // IN: dentry to lookup
  235. -              struct nameidata *nd)   // IN: lookup intent and information
  236. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  237. +         struct nameidata *nd)   // IN: lookup intent and information
  238. +#else
  239. +              unsigned int flags)
  240. +#endif
  241.  {
  242.     char *filename;
  243.     struct inode *inode;
  244. @@ -135,7 +148,12 @@
  245.     inode->i_size = INODE_TO_IINFO(inode)->nameLen;
  246.     inode->i_version = 1;
  247.     inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  248. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  249.     inode->i_uid = inode->i_gid = 0;
  250. +#else
  251. +   inode->i_gid = make_kgid(current_user_ns(), 0);
  252. +   inode->i_uid = make_kuid(current_user_ns(), 0);
  253. +#endif
  254.     inode->i_op = &LinkInodeOps;
  255.  
  256.     d_add(dentry, inode);
  257. @@ -176,8 +194,11 @@
  258.     if (!iinfo) {
  259.        return -EINVAL;
  260.     }
  261. -
  262. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
  263. +   return readlink_copy(buffer, buflen, iinfo->name);
  264. +#else
  265.     return vfs_readlink(dentry, buffer, buflen, iinfo->name);
  266. +#endif
  267.  }
  268.  
  269.  
  270. @@ -221,7 +242,7 @@
  271.        goto out;
  272.     }
  273.  
  274. -   ret = vfs_follow_link(nd, iinfo->name);
  275. +   nd_set_link(nd, iinfo->name);
  276.  
  277.  out:
  278.  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
  279. @@ -230,3 +251,4 @@
  280.     return ret;
  281.  #endif
  282.  }
  283. +
  284. diff -ur a/vmblock-only/shared/vm_assert.h b/vmblock-only/shared/vm_assert.h
  285. --- a/vmblock-only/shared/vm_assert.h   2014-04-15 01:41:41.000000000 +0400
  286. +++ b/vmblock-only/shared/vm_assert.h   2014-03-24 13:59:49.000000000 +0400
  287. @@ -256,7 +256,8 @@
  288.                                         __FILE__, __LINE__, __FUNCTION__, \
  289.                                         _fix))
  290.  #else
  291. -   #define DEPRECATED(_fix) do {} while (0)
  292. +    #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
  293. +    #define DEPRECATED(_fix) do {} while (0)
  294.  #endif
  295.  
  296.  
  297. diff -ur a/vmci-only/linux/driver.c b/vmci-only/linux/driver.c
  298. --- a/vmci-only/linux/driver.c  2014-04-15 01:41:40.000000000 +0400
  299. +++ b/vmci-only/linux/driver.c  2014-07-18 16:58:40.000000000 +0400
  300. @@ -737,7 +737,11 @@
  301.           goto init_release;
  302.        }
  303.  
  304. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  305.        user = current_uid();
  306. +#else
  307. +      user = from_kuid(&init_user_ns, current_uid());
  308. +#endif
  309.        retval = VMCIContext_InitContext(initBlock.cid, initBlock.flags,
  310.                                         0 /* Unused */, vmciLinux->userVersion,
  311.                                         &user, &vmciLinux->context);
  312. diff -ur a/vmci-only/shared/vm_assert.h b/vmci-only/shared/vm_assert.h
  313. --- a/vmci-only/shared/vm_assert.h  2014-04-15 01:41:41.000000000 +0400
  314. +++ b/vmci-only/shared/vm_assert.h  2014-03-24 13:59:14.000000000 +0400
  315. @@ -256,7 +256,8 @@
  316.                                         __FILE__, __LINE__, __FUNCTION__, \
  317.                                         _fix))
  318.  #else
  319. -   #define DEPRECATED(_fix) do {} while (0)
  320. +    #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
  321. +    #define DEPRECATED(_fix) do {} while (0)
  322.  #endif
  323.  
  324.  
  325. diff -ur a/vmmon-only/include/memDefaults.h b/vmmon-only/include/memDefaults.h
  326. --- a/vmmon-only/include/memDefaults.h  2014-04-15 04:06:20.000000000 +0400
  327. +++ b/vmmon-only/include/memDefaults.h  2013-10-18 23:11:54.000000000 +0400
  328. @@ -34,62 +34,66 @@
  329.  
  330.  
  331.  /*
  332. - *-----------------------------------------------------------------------------
  333. + *----------------------------------------------------------------------
  334.   *
  335.   * MemDefaults_CalcMaxLockedPages --
  336.   *
  337. - *      Calculate the rough estimate of the maximum amount of memory
  338. - *      that can be locked (total for the kernel, all VMs, and other apps),
  339. - *      based on the size of host memory as supplied in pages.
  340. + *    Calculate the rough estimate of the maximum amount of memory
  341. + *    that can be locked based on the size of host memory as supplied
  342. + *    in Pages.
  343.   *
  344.   * Results:
  345. - *      The estimated maximum memory that can be locked in pages.
  346. + *    The estimated maximum memory that can be locked in Pages.
  347.   *
  348.   * Side effects:
  349. - *      None
  350. + *    None
  351.   *
  352. - *-----------------------------------------------------------------------------
  353. + *----------------------------------------------------------------------
  354.   */
  355.  
  356.  static INLINE unsigned
  357.  MemDefaults_CalcMaxLockedPages(unsigned hostPages)  // IN:
  358.  {
  359. -   unsigned reservedPages;
  360. +   APPLE_ONLY(unsigned reservedPages;)
  361.  
  362. +   /*
  363. +    * Once the amount of host memory crosses the lower bound give up.
  364. +    */
  365. +   if (hostPages < MEMDEFAULTS_MIN_HOST_PAGES) {
  366. +      return 0;
  367. +   }
  368.  #if defined(__APPLE__)
  369.     /*
  370. -    * Reserve (25% of the host memory + 512 MB) or 4 GB, whichever is lower.
  371. -    * 4 GB hosts perform poorly with less than 1.5 GB reserved, and large
  372. -    * memory hosts (>= 16 GB) may want to use more than 75% for VMs.
  373. +    * Reserve 20% of host memory + 820 MB or 4GB, whichever is lower,
  374. +    * for Mac OS and other apps.
  375.      */
  376. -   reservedPages = MIN((hostPages / 4) + MBYTES_2_PAGES(512),
  377. -                       GBYTES_2_PAGES(4));
  378. +   reservedPages = MIN(GBYTES_2_PAGES(4),
  379. +                       RatioOf(hostPages, 2, 10) + MBYTES_2_PAGES(820));
  380. +   return hostPages > reservedPages ? hostPages - reservedPages : 0;
  381.  #elif defined(_WIN32)
  382. -   reservedPages = MAX(hostPages / 4, MEMDEFAULTS_MIN_HOST_PAGES);
  383. +   return hostPages - MAX(hostPages / 4, MEMDEFAULTS_MIN_HOST_PAGES);
  384.  #else  // Linux
  385. -   reservedPages = MAX(hostPages / 8, MEMDEFAULTS_MIN_HOST_PAGES);
  386. +   return hostPages - MAX(hostPages / 8, MEMDEFAULTS_MIN_HOST_PAGES);
  387.  #endif
  388. -
  389. -   return hostPages > reservedPages ? hostPages - reservedPages : 0;
  390.  }
  391.  
  392.  
  393.  /*
  394. - *-----------------------------------------------------------------------------
  395. + *----------------------------------------------------------------------
  396.   *
  397.   * MemDefaults_CalcMaxLockedMBs --
  398.   *
  399. - *      Calculate the rough estimate of the maximum amount of memory
  400. - *      that can be locked based on the size of host memory as supplied
  401. - *      in MBytes.
  402. + *    Calculate the rough estimate of the maximum amount of memory
  403. + *    that can be locked based on the size of host memory as supplied
  404. + *    in MBytes.
  405.   *
  406.   * Results:
  407. - *      The estimated maximum memory that can be locked in MBytes.
  408. + *    The estimated maximum memory that can be locked in MBytes.
  409.   *
  410.   * Side effects:
  411. - *      None
  412. + *    None
  413.   *
  414. - *-----------------------------------------------------------------------------
  415. + *----------------------------------------------------------------------
  416.   */
  417.  
  418.  static INLINE uint32
  419. @@ -101,22 +105,22 @@
  420.  
  421.  
  422.  /*
  423. - *-----------------------------------------------------------------------------
  424. + *----------------------------------------------------------------------
  425.   *
  426.   * MemDefaults_CalcMinReservedMBs --
  427.   *
  428. - *      Provide a lower bound on the user as to the minimum amount
  429. - *      of memory to lock based on the size of host memory. This
  430. - *      threshold might be crossed as a result of the user limiting
  431. - *      the amount of memory consumed by all VMs.
  432. + *    Provide a lower bound on the user as to the minimum amount
  433. + *    of memory to lock based on the size of host memory. This
  434. + *    threshold might be crossed as a result of the user limiting
  435. + *    the amount of memory consumed by all VMs.
  436.   *
  437.   * Results:
  438. - *      The minimum locked memory requirement in MBytes.
  439. + *    The minimum locked memory requirement in MBytes.
  440.   *
  441.   * Side effects:
  442. - *      None
  443. + *    None
  444.   *
  445. - *-----------------------------------------------------------------------------
  446. + *----------------------------------------------------------------------
  447.   */
  448.  
  449.  static INLINE uint32
  450. diff -ur a/vmmon-only/include/vm_assert.h b/vmmon-only/include/vm_assert.h
  451. --- a/vmmon-only/include/vm_assert.h    2014-04-15 04:06:20.000000000 +0400
  452. +++ b/vmmon-only/include/vm_assert.h    2014-03-24 14:00:03.000000000 +0400
  453. @@ -256,7 +256,8 @@
  454.                                         __FILE__, __LINE__, __FUNCTION__, \
  455.                                         _fix))
  456.  #else
  457. -   #define DEPRECATED(_fix) do {} while (0)
  458. +    #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
  459. +    #define DEPRECATED(_fix) do {} while (0)
  460.  #endif
  461.  
  462.  
  463. diff -ur a/vmmon-only/linux/driver.c b/vmmon-only/linux/driver.c
  464. --- a/vmmon-only/linux/driver.c 2014-04-15 04:06:21.000000000 +0400
  465. +++ b/vmmon-only/linux/driver.c 2014-03-24 13:48:23.000000000 +0400
  466. @@ -1338,7 +1338,9 @@
  467.   *-----------------------------------------------------------------------------
  468.   */
  469.  
  470. -__attribute__((always_inline)) static Bool
  471. +#include <linux/compiler-gcc.h>
  472. +
  473. +__always_inline static Bool
  474.  LinuxDriverSyncReadTSCs(uint64 *delta) // OUT: TSC max - TSC min
  475.  {
  476.     TSCDelta tscDelta;
  477. @@ -1348,7 +1350,7 @@
  478.     /* Take the global lock to block concurrent calls. */
  479.     HostIF_GlobalLock(14);
  480.  
  481. -   /* Loop to warm up the cache. */
  482. +   /* Loop to warm up the cache. */
  483.     for (i = 0; i < 3; i++) {
  484.        Atomic_Write64(&tscDelta.min, ~CONST64U(0));
  485.        Atomic_Write64(&tscDelta.max, CONST64U(0));
  486. diff -ur a/vmmon-only/linux/vmmonInt.h b/vmmon-only/linux/vmmonInt.h
  487. --- a/vmmon-only/linux/vmmonInt.h   2014-04-15 04:06:21.000000000 +0400
  488. +++ b/vmmon-only/linux/vmmonInt.h   2013-10-28 02:32:10.000000000 +0400
  489. @@ -31,7 +31,7 @@
  490.  #ifdef VMW_HAVE_SMP_CALL_3ARG
  491.  #define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, wait)
  492.  #else
  493. -#define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, 1, wait)
  494. +#define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, wait)
  495.  #endif
  496.  
  497.  /*
  498. diff -ur a/vmnet-only/driver.c b/vmnet-only/driver.c
  499. --- a/vmnet-only/driver.c   2014-04-15 04:06:22.000000000 +0400
  500. +++ b/vmnet-only/driver.c   2013-10-18 23:11:55.000000000 +0400
  501. @@ -176,7 +176,6 @@
  502.                      Bool connectNewToPeer,
  503.                      struct file *filp, VNetPort *jackPort,
  504.                      VNetPort *newPeerPort);
  505. -static void VNetKrefRelease(struct kref *kref);
  506.  
  507.  uint vnet_max_qlen = VNET_MAX_QLEN;
  508.  module_param(vnet_max_qlen, uint, 0);
  509. @@ -620,7 +619,7 @@
  510.  
  511.     hubJack = VNetHub_AllocVnet(hubNum);
  512.     if (!hubJack) {
  513. -      kref_put(&port->jack.kref, VNetKrefRelease);
  514. +      VNetFree(&port->jack);
  515.        return -EBUSY;
  516.     }
  517.  
  518. @@ -628,8 +627,8 @@
  519.     retval = VNetConnect(&port->jack, hubJack);
  520.     if (retval) {
  521.        mutex_unlock(&vnetStructureMutex);
  522. -      kref_put(&port->jack.kref, VNetKrefRelease);
  523. -      kref_put(&hubJack->kref, VNetKrefRelease);
  524. +      VNetFree(&port->jack);
  525. +      VNetFree(hubJack);
  526.        return retval;
  527.     }
  528.  
  529. @@ -682,8 +681,8 @@
  530.     VNetRemovePortFromList(port);
  531.     mutex_unlock(&vnetStructureMutex);
  532.  
  533. -   kref_put(&port->jack.kref, VNetKrefRelease);
  534. -   kref_put(&peer->kref, VNetKrefRelease);
  535. +   VNetFree(&port->jack);
  536. +   VNetFree(peer);
  537.  
  538.     return 0;
  539.  }
  540. @@ -1317,7 +1316,7 @@
  541.        mutex_unlock(&vnetStructureMutex);
  542.  
  543.        /* Free the new peer */
  544. -      kref_put(&newPeer->kref, VNetKrefRelease);
  545. +      VNetFree(newPeer);
  546.        if (retval2) {
  547.      // assert xxx redo this
  548.      LOG(1, (KERN_NOTICE "/dev/vmnet: cycle on connect failure\n"));
  549. @@ -1340,9 +1339,9 @@
  550.  
  551.     /* Connected to new peer, so dealloc the old peer */
  552.     if (connectNewToPeerOfJack) {
  553. -      kref_put(&jack->kref, VNetKrefRelease);
  554. +      VNetFree(jack);
  555.     } else {
  556. -      kref_put(&oldPeer->kref, VNetKrefRelease);
  557. +      VNetFree(oldPeer);
  558.     }
  559.  
  560.     return 0;
  561. @@ -1560,10 +1559,6 @@
  562.     write_lock_irqsave(&vnetPeerLock, flags);
  563.     jack1->peer = jack2;
  564.     jack2->peer = jack1;
  565. -   jack1->state = TRUE;
  566. -   jack2->state = TRUE;
  567. -   kref_init(&jack1->kref);
  568. -   kref_init(&jack2->kref);
  569.     write_unlock_irqrestore(&vnetPeerLock, flags);
  570.  
  571.     if (jack2->numPorts) {
  572. @@ -1607,8 +1602,8 @@
  573.        write_unlock_irqrestore(&vnetPeerLock, flags);
  574.        return NULL;
  575.     }
  576. -   jack->state = FALSE;
  577. -   peer->state = FALSE;
  578. +   jack->peer = NULL;
  579. +   peer->peer = NULL;
  580.     write_unlock_irqrestore(&vnetPeerLock, flags);
  581.  
  582.     if (peer->numPorts) {
  583. @@ -1707,33 +1702,6 @@
  584.  /*
  585.   *----------------------------------------------------------------------
  586.   *
  587. - * VNetKrefRelease --
  588. - *
  589. - *      Free the VNetJack if no reference.
  590. - *
  591. - * Results:
  592. - *      None.
  593. - *
  594. - * Side effects:
  595. - *      None.
  596. - *
  597. - *----------------------------------------------------------------------
  598. - */
  599. -
  600. -static void
  601. -VNetKrefRelease(struct kref *kref)
  602. -{
  603. -   struct VNetJack *jack = container_of(kref, struct VNetJack, kref);
  604. -
  605. -   jack->state = FALSE;
  606. -   jack->peer = NULL;
  607. -   VNetFree(jack);
  608. -}
  609. -
  610. -
  611. -/*
  612. - *----------------------------------------------------------------------
  613. - *
  614.   * VNetSend --
  615.   *
  616.   *      Send a packet through this jack. Note, the packet goes to the
  617. @@ -1749,23 +1717,16 @@
  618.   */
  619.  
  620.  void
  621. -VNetSend(VNetJack *jack, // IN: jack
  622. +VNetSend(const VNetJack *jack, // IN: jack
  623.           struct sk_buff *skb)  // IN: packet
  624.  {
  625. -   VNetJack *peer;
  626. -
  627.     read_lock(&vnetPeerLock);
  628.     if (jack && jack->peer && jack->peer->rcv) {
  629. -      peer = jack->peer;
  630. -      kref_get(&(peer->kref));
  631. -      read_unlock(&vnetPeerLock);
  632. -
  633. -      peer->rcv(peer, skb);
  634. -      kref_put(&(peer->kref), VNetKrefRelease);
  635. +      jack->peer->rcv(jack->peer, skb);
  636.     } else {
  637. -      read_unlock(&vnetPeerLock);
  638.        dev_kfree_skb(skb);
  639.     }
  640. +   read_unlock(&vnetPeerLock);
  641.  }
  642.  
  643.  
  644. diff -ur a/vmnet-only/filter.c b/vmnet-only/filter.c
  645. --- a/vmnet-only/filter.c   2014-04-15 04:06:22.000000000 +0400
  646. +++ b/vmnet-only/filter.c   2013-12-04 01:15:21.000000000 +0400
  647. @@ -27,6 +27,7 @@
  648.  #include "compat_module.h"
  649.  #include <linux/mutex.h>
  650.  #include <linux/netdevice.h>
  651. +#include <linux/version.h>
  652.  #if COMPAT_LINUX_VERSION_CHECK_LT(3, 2, 0)
  653.  #   include <linux/module.h>
  654.  #else
  655. @@ -203,10 +204,10 @@
  656.  #endif
  657.  
  658.  static unsigned int
  659. -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
  660. -VNetFilterHookFn(const struct nf_hook_ops *ops,        // IN:
  661. -#else
  662. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  663.  VNetFilterHookFn(unsigned int hooknum,                 // IN:
  664. +#else
  665. +VNetFilterHookFn(const struct nf_hook_ops *ops,        // IN:
  666.  #endif
  667.  #ifdef VMW_NFHOOK_USES_SKB
  668.                   struct sk_buff *skb,                  // IN:
  669. @@ -256,10 +257,11 @@
  670.  
  671.     /* When the host transmits, hooknum is VMW_NF_INET_POST_ROUTING. */
  672.     /* When the host receives, hooknum is VMW_NF_INET_LOCAL_IN. */
  673. -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
  674. -   transmit = (ops->hooknum == VMW_NF_INET_POST_ROUTING);
  675. +
  676. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  677. +    transmit = (hooknum == VMW_NF_INET_POST_ROUTING);
  678.  #else
  679. -   transmit = (hooknum == VMW_NF_INET_POST_ROUTING);
  680. +    transmit = (ops->hooknum == VMW_NF_INET_POST_ROUTING);
  681.  #endif
  682.  
  683.     packetHeader = compat_skb_network_header(skb);
  684. diff -ur a/vmnet-only/hub.c b/vmnet-only/hub.c
  685. --- a/vmnet-only/hub.c  2014-04-15 04:06:22.000000000 +0400
  686. +++ b/vmnet-only/hub.c  2013-10-18 23:11:55.000000000 +0400
  687. @@ -129,7 +129,7 @@
  688.  {
  689.     VNetHub *currHub = vnetHub;
  690.     while (currHub && (currHub->hubType != HUB_TYPE_PVN ||
  691. -             memcmp(idNum, currHub->id.pvnID, sizeof currHub->id.pvnID))) {
  692. +             memcmp(idNum, currHub->id.pvnID, sizeof idNum))) {
  693.        currHub = currHub->next;
  694.     }
  695.     return currHub;
  696. @@ -312,7 +312,7 @@
  697.  
  698.        if (allocPvn) {
  699.      hub->hubType = HUB_TYPE_PVN;
  700. -    memcpy(hub->id.pvnID, id, sizeof hub->id.pvnID);
  701. +    memcpy(hub->id.pvnID, id, sizeof id);
  702.      ++pvnInstance;
  703.        } else {
  704.      hub->hubType = HUB_TYPE_VNET;
  705. @@ -536,8 +536,6 @@
  706.        jack = &hub->jack[i];
  707.        if (jack->private &&   /* allocated */
  708.            jack->peer &&      /* and connected */
  709. -          jack->state &&     /* and enabled */
  710. -          jack->peer->state && /* and enabled */
  711.            jack->peer->rcv && /* and has a receiver */
  712.            (jack != this)) {  /* and not a loop */
  713.           clone = skb_clone(skb, GFP_ATOMIC);
  714. @@ -582,7 +580,7 @@
  715.     hub->myGeneration = generation;
  716.  
  717.     for (i = 0; i < NUM_JACKS_PER_HUB; i++) {
  718. -      if (hub->jack[i].private && hub->jack[i].state && (i != this->index)) {
  719. +      if (hub->jack[i].private && (i != this->index)) {
  720.           foundCycle = VNetCycleDetect(hub->jack[i].peer, generation);
  721.           if (foundCycle) {
  722.              return TRUE;
  723. @@ -638,8 +636,7 @@
  724.              }
  725.           } else {
  726.              hub->jack[i].numPorts = new;
  727. -            if (hub->jack[i].state)
  728. -               VNetPortsChanged(hub->jack[i].peer);
  729. +            VNetPortsChanged(hub->jack[i].peer);
  730.           }
  731.        }
  732.     }
  733. diff -ur a/vmnet-only/netif.c b/vmnet-only/netif.c
  734. --- a/vmnet-only/netif.c    2014-04-15 04:06:21.000000000 +0400
  735. +++ b/vmnet-only/netif.c    2014-09-27 01:33:08.000000000 +0400
  736. @@ -149,7 +149,11 @@
  737.     memcpy(deviceName, devName, sizeof deviceName);
  738.     NULL_TERMINATE_STRING(deviceName);
  739.  
  740. -   dev = alloc_netdev(sizeof *netIf, deviceName, VNetNetIfSetup);
  741. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  742. +   dev = alloc_netdev(sizeof *netIf, deviceName, NET_NAME_UNKNOWN, VNetNetIfSetup);
  743. +#else
  744. +   dev = alloc_netdev(sizeof *netIf, deviceName, NET_NAME_UNKNOWN, VNetNetIfSetup);
  745. +#endif
  746.     if (!dev) {
  747.        retval = -ENOMEM;
  748.        goto out;
  749. @@ -221,7 +225,7 @@
  750.        LOG(0, (KERN_NOTICE "%s: could not register network device\n",
  751.            dev->name));
  752.        retval = -ENODEV;
  753. -      goto outRemoveProc;
  754. +      goto outFreeDev;
  755.     }
  756.  
  757.     *ret = &netIf->port;
  758. diff -ur a/vmnet-only/userif.c b/vmnet-only/userif.c
  759. --- a/vmnet-only/userif.c   2014-04-15 04:06:22.000000000 +0400
  760. +++ b/vmnet-only/userif.c   2013-10-18 23:11:55.000000000 +0400
  761. @@ -62,7 +62,7 @@
  762.  typedef struct VNetUserIF {
  763.     VNetPort               port;
  764.     struct sk_buff_head    packetQueue;
  765. -   Atomic_uint32         *pollPtr;
  766. +   uint32*                pollPtr;
  767.     MonitorActionIntr     *actionIntr;
  768.     uint32                 pollMask;
  769.     MonitorIdemAction      actionID;
  770. @@ -194,14 +194,6 @@
  771.  VNetUserIfSetupNotify(VNetUserIF *userIf, // IN
  772.                        VNet_Notify *vn)    // IN
  773.  {
  774. -   unsigned long flags;
  775. -   struct sk_buff_head *q = &userIf->packetQueue;
  776. -   uint32 *pollPtr;
  777. -   MonitorActionIntr *actionIntr;
  778. -   uint32 *recvClusterCount;
  779. -   struct page *pollPage = NULL;
  780. -   struct page *actPage = NULL;
  781. -   struct page *recvClusterPage = NULL;
  782.     int retval;
  783.  
  784.     if (userIf->pollPtr || userIf->actionIntr || userIf->recvClusterCount) {
  785. @@ -209,63 +201,28 @@
  786.        return -EBUSY;
  787.     }
  788.  
  789. -   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &pollPage,
  790. -                                        &pollPtr)) < 0) {
  791. +   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &userIf->pollPage,
  792. +                                        &userIf->pollPtr)) < 0) {
  793.        return retval;
  794.     }
  795.  
  796. -   /* Atomic operations require proper alignment */
  797. -   if ((uintptr_t)pollPtr & (sizeof *pollPtr - 1)) {
  798. -      LOG(0, (KERN_DEBUG "vmnet: Incorrect notify alignment\n"));
  799. -      retval = -EFAULT;
  800. -      goto error_free;
  801. -   }
  802. -
  803. -   if ((retval = VNetUserIfMapPtr((VA)vn->actPtr, sizeof *actionIntr,
  804. -                                  &actPage,
  805. -                                  (void **)&actionIntr)) < 0) {
  806. -      goto error_free;
  807. -   }
  808. -
  809. -   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
  810. -                                        &recvClusterPage,
  811. -                                        &recvClusterCount)) < 0) {
  812. -      goto error_free;
  813. +   if ((retval = VNetUserIfMapPtr((VA)vn->actPtr, sizeof *userIf->actionIntr,
  814. +                                  &userIf->actPage,
  815. +                                  (void **)&userIf->actionIntr)) < 0) {
  816. +      VNetUserIfUnsetupNotify(userIf);
  817. +      return retval;
  818.     }
  819.  
  820. -   spin_lock_irqsave(&q->lock, flags);
  821. -   if (userIf->pollPtr || userIf->actionIntr || userIf->recvClusterCount) {
  822. -      spin_unlock_irqrestore(&q->lock, flags);
  823. -      retval = -EBUSY;
  824. -      LOG(0, (KERN_DEBUG "vmnet: Notification mechanism already active\n"));
  825. -      goto error_free;
  826. +   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
  827. +                                        &userIf->recvClusterPage,
  828. +                                        &userIf->recvClusterCount)) < 0) {
  829. +      VNetUserIfUnsetupNotify(userIf);
  830. +      return retval;
  831.     }
  832.  
  833. -   userIf->pollPtr = (Atomic_uint32 *)pollPtr;
  834. -   userIf->pollPage = pollPage;
  835. -   userIf->actionIntr = actionIntr;
  836. -   userIf->actPage = actPage;
  837. -   userIf->recvClusterCount = recvClusterCount;
  838. -   userIf->recvClusterPage = recvClusterPage;
  839.     userIf->pollMask = vn->pollMask;
  840.     userIf->actionID = vn->actionID;
  841. -   spin_unlock_irqrestore(&q->lock, flags);
  842.     return 0;
  843. -
  844. - error_free:
  845. -   if (pollPage) {
  846. -      kunmap(pollPage);
  847. -      put_page(pollPage);
  848. -   }
  849. -   if (actPage) {
  850. -      kunmap(actPage);
  851. -      put_page(actPage);
  852. -   }
  853. -   if (recvClusterPage) {
  854. -      kunmap(recvClusterPage);
  855. -      put_page(recvClusterPage);
  856. -   }
  857. -   return retval;
  858.  }
  859.  
  860.  /*
  861. @@ -288,14 +245,24 @@
  862.  static void
  863.  VNetUserIfUnsetupNotify(VNetUserIF *userIf) // IN
  864.  {
  865. -   unsigned long flags;
  866. -   struct page *pollPage = userIf->pollPage;
  867. -   struct page *actPage = userIf->actPage;
  868. -   struct page *recvClusterPage = userIf->recvClusterPage;
  869. -
  870. -   struct sk_buff_head *q = &userIf->packetQueue;
  871. -
  872. -   spin_lock_irqsave(&q->lock, flags);
  873. +   if (userIf->pollPage) {
  874. +      kunmap(userIf->pollPage);
  875. +      put_page(userIf->pollPage);
  876. +   } else {
  877. +      LOG(0, (KERN_DEBUG "vmnet: pollPtr was already deactivated\n"));
  878. +   }
  879. +   if (userIf->actPage) {
  880. +      kunmap(userIf->actPage);
  881. +      put_page(userIf->actPage);
  882. +   } else {
  883. +      LOG(0, (KERN_DEBUG "vmnet: actPtr was already deactivated\n"));
  884. +   }
  885. +   if (userIf->recvClusterPage) {
  886. +      kunmap(userIf->recvClusterPage);
  887. +      put_page(userIf->recvClusterPage);
  888. +   } else {
  889. +      LOG(0, (KERN_DEBUG "vmnet: recvClusterPtr was already deactivated\n"));
  890. +   }
  891.     userIf->pollPtr = NULL;
  892.     userIf->pollPage = NULL;
  893.     userIf->actionIntr = NULL;
  894. @@ -304,21 +271,6 @@
  895.     userIf->recvClusterPage = NULL;
  896.     userIf->pollMask = 0;
  897.     userIf->actionID = -1;
  898. -   spin_unlock_irqrestore(&q->lock, flags);
  899. -
  900. -   /* Release */
  901. -   if (pollPage) {
  902. -      kunmap(pollPage);
  903. -      put_page(pollPage);
  904. -   }
  905. -   if (actPage) {
  906. -      kunmap(actPage);
  907. -      put_page(actPage);
  908. -   }
  909. -   if (recvClusterPage) {
  910. -      kunmap(recvClusterPage);
  911. -      put_page(recvClusterPage);
  912. -   }
  913.  }
  914.  
  915.  
  916. @@ -390,7 +342,6 @@
  917.  {
  918.     VNetUserIF *userIf = (VNetUserIF*)this->private;
  919.     uint8 *dest = SKB_2_DESTMAC(skb);
  920. -   unsigned long flags;
  921.    
  922.     if (!UP_AND_RUNNING(userIf->port.flags)) {
  923.        userIf->stats.droppedDown++;
  924. @@ -419,20 +370,13 @@
  925.  
  926.     userIf->stats.queued++;
  927.  
  928. -   spin_lock_irqsave(&userIf->packetQueue.lock, flags);
  929. -   /*
  930. -    * __skb_dequeue_tail does not take any locks so must be used with
  931. -    * appropriate locks held only.
  932. -    */
  933. -   __skb_queue_tail(&userIf->packetQueue, skb);
  934. +   skb_queue_tail(&userIf->packetQueue, skb);
  935.     if (userIf->pollPtr) {
  936. -      Atomic_Or(userIf->pollPtr, userIf->pollMask);
  937. +      *userIf->pollPtr |= userIf->pollMask;
  938.        if (skb_queue_len(&userIf->packetQueue) >= (*userIf->recvClusterCount)) {
  939.           MonitorAction_SetBits(userIf->actionIntr, userIf->actionID);
  940.        }
  941.     }
  942. -   spin_unlock_irqrestore(&userIf->packetQueue.lock, flags);
  943. -
  944.     wake_up(&userIf->waitQueue);
  945.     return;
  946.    
  947. @@ -698,7 +642,6 @@
  948.     VNetUserIF *userIf = (VNetUserIF*)port->jack.private;
  949.     struct sk_buff *skb;
  950.     int ret;
  951. -   unsigned long flags;
  952.     DECLARE_WAITQUEUE(wait, current);
  953.  
  954.     add_wait_queue(&userIf->waitQueue, &wait);
  955. @@ -711,20 +654,13 @@
  956.           break;
  957.        }
  958.        ret = -EAGAIN;
  959. +      skb = skb_dequeue(&userIf->packetQueue);
  960.  
  961. -      spin_lock_irqsave(&userIf->packetQueue.lock, flags);
  962. -      /*
  963. -       * __skb_dequeue does not take any locks so must be used with
  964. -       * appropriate locks held only.
  965. -       */
  966. -      skb = __skb_dequeue(&userIf->packetQueue);
  967.        if (userIf->pollPtr) {
  968. -         if (!skb) {
  969. -            /* List empty */
  970. -            Atomic_And(userIf->pollPtr, ~userIf->pollMask);
  971. +         if (skb_queue_empty(&userIf->packetQueue)) {
  972. +            *userIf->pollPtr &= ~userIf->pollMask;
  973.           }
  974.        }
  975. -      spin_unlock_irqrestore(&userIf->packetQueue.lock, flags);
  976.  
  977.        if (skb != NULL || filp->f_flags & O_NONBLOCK) {
  978.           break;
  979. @@ -902,24 +838,15 @@
  980.        
  981.        if (!UP_AND_RUNNING(userIf->port.flags)) {
  982.           struct sk_buff *skb;
  983. -         unsigned long flags;
  984. -         struct sk_buff_head *q = &userIf->packetQueue;
  985.          
  986. -         while ((skb = skb_dequeue(q)) != NULL) {
  987. +         while ((skb = skb_dequeue(&userIf->packetQueue)) != NULL) {
  988.              dev_kfree_skb(skb);
  989.           }
  990.          
  991. -         spin_lock_irqsave(&q->lock, flags);
  992.           if (userIf->pollPtr) {
  993. -            if (skb_queue_empty(q)) {
  994. -               /*
  995. -                * Clear the pending bit as no packets are pending at this
  996. -                * point.
  997. -                */
  998. -               Atomic_And(userIf->pollPtr, ~userIf->pollMask);
  999. -            }
  1000. +            /* Clear the pending bit as no packets are pending at this point. */
  1001. +            *userIf->pollPtr &= ~userIf->pollMask;
  1002.           }
  1003. -         spin_unlock_irqrestore(&q->lock, flags);
  1004.        }
  1005.        break;
  1006.     case SIOCINJECTLINKSTATE:
  1007. @@ -1005,7 +932,7 @@
  1008.     userIf = (VNetUserIF *)port->jack.private;
  1009.     hubJack = port->jack.peer;
  1010.  
  1011. -   if (port->jack.state == FALSE || hubJack == NULL) {
  1012. +   if (hubJack == NULL) {
  1013.        return -EINVAL;
  1014.     }
  1015.  
  1016. diff -ur a/vmnet-only/vm_assert.h b/vmnet-only/vm_assert.h
  1017. --- a/vmnet-only/vm_assert.h    2014-04-15 04:06:21.000000000 +0400
  1018. +++ b/vmnet-only/vm_assert.h    2014-03-24 14:00:31.000000000 +0400
  1019. @@ -256,7 +256,8 @@
  1020.                                         __FILE__, __LINE__, __FUNCTION__, \
  1021.                                         _fix))
  1022.  #else
  1023. -   #define DEPRECATED(_fix) do {} while (0)
  1024. +    #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
  1025. +    #define DEPRECATED(_fix) do {} while (0)
  1026.  #endif
  1027.  
  1028.  
  1029. diff -ur a/vmnet-only/vnetInt.h b/vmnet-only/vnetInt.h
  1030. --- a/vmnet-only/vnetInt.h  2014-04-15 04:06:22.000000000 +0400
  1031. +++ b/vmnet-only/vnetInt.h  2013-10-18 23:11:55.000000000 +0400
  1032. @@ -131,9 +131,7 @@
  1033.     void          *private;     // private field for containing object
  1034.     int            index;       // private field for containing object
  1035.     VNetProcEntry *procEntry;   // private field for containing object
  1036. -   Bool           state;       // TRUE for enabled
  1037. -   struct kref    kref;        // ref count
  1038. -
  1039. +  
  1040.     void         (*free)(VNetJack *this);
  1041.     void         (*rcv)(VNetJack *this, struct sk_buff *skb);
  1042.     Bool         (*cycleDetect)(VNetJack *this, int generation);
  1043. @@ -185,7 +183,7 @@
  1044.  
  1045.  VNetJack *VNetDisconnect(VNetJack *jack);
  1046.  
  1047. -void VNetSend(VNetJack *jack, struct sk_buff *skb);
  1048. +void VNetSend(const VNetJack *jack, struct sk_buff *skb);
  1049.  
  1050.  int VNetProc_MakeEntry(char *name, int mode, void *data,
  1051.                         VNetProcReadFn *fn, VNetProcEntry **ret);
  1052. @@ -302,8 +300,7 @@
  1053.  static INLINE int
  1054.  VNetIsBridged(VNetJack *jack) // IN: jack
  1055.  {
  1056. -   if (jack && jack->state && jack->peer && jack->peer->state &&
  1057. -       jack->peer->isBridged) {
  1058. +   if (jack && jack->peer && jack->peer->isBridged) {
  1059.        return jack->peer->isBridged(jack->peer);
  1060.     }
  1061.  
  1062. @@ -355,7 +352,7 @@
  1063.  static INLINE int
  1064.  VNetGetAttachedPorts(VNetJack *jack) // IN: jack
  1065.  {
  1066. -   if (jack && jack->state && jack->peer && jack->peer->state) {
  1067. +   if (jack && jack->peer) {
  1068.        return jack->peer->numPorts;
  1069.     }
  1070.     return 0;
  1071. diff -ur a/vsock-only/linux/af_vsock.c b/vsock-only/linux/af_vsock.c
  1072. --- a/vsock-only/linux/af_vsock.c   2014-04-15 01:41:41.000000000 +0400
  1073. +++ b/vsock-only/linux/af_vsock.c   2014-07-18 17:02:10.000000000 +0400
  1074. @@ -2869,7 +2869,11 @@
  1075.        vsk->connectTimeout = psk->connectTimeout;
  1076.     } else {
  1077.        vsk->trusted = capable(CAP_NET_ADMIN);
  1078. +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
  1079.        vsk->owner = current_uid();
  1080. +#else
  1081. +      vsk->owner = from_kuid(&init_user_ns, current_uid()),
  1082. +#endif
  1083.        vsk->queuePairSize = VSOCK_DEFAULT_QP_SIZE;
  1084.        vsk->queuePairMinSize = VSOCK_DEFAULT_QP_SIZE_MIN;
  1085.        vsk->queuePairMaxSize = VSOCK_DEFAULT_QP_SIZE_MAX;
  1086. diff -ur a/vsock-only/linux/notify.c b/vsock-only/linux/notify.c
  1087. --- a/vsock-only/linux/notify.c 2014-04-15 01:41:41.000000000 +0400
  1088. +++ b/vsock-only/linux/notify.c 2014-09-27 01:40:01.000000000 +0400
  1089. @@ -516,7 +516,11 @@
  1090.     PKT_FIELD(vsk, sentWaitingRead) = FALSE;
  1091.  #endif
  1092.  
  1093. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  1094. +   sk->sk_data_ready(sk);
  1095. +#else
  1096.     sk->sk_data_ready(sk, 0);
  1097. +#endif
  1098.  }
  1099.  
  1100.  
  1101. diff -ur a/vsock-only/linux/notifyQState.c b/vsock-only/linux/notifyQState.c
  1102. --- a/vsock-only/linux/notifyQState.c   2014-04-15 01:41:41.000000000 +0400
  1103. +++ b/vsock-only/linux/notifyQState.c   2014-09-27 01:38:08.000000000 +0400
  1104. @@ -164,7 +164,11 @@
  1105.                       struct sockaddr_vm *dst,    // IN: unused
  1106.                       struct sockaddr_vm *src)    // IN: unused
  1107.  {
  1108. -   sk->sk_data_ready(sk, 0);
  1109. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  1110. +   sk->sk_data_ready(sk);
  1111. +#else
  1112. +    sk->sk_data_ready(sk, 0);
  1113. +#endif
  1114.  }
  1115.  
  1116.  
  1117. @@ -566,7 +570,11 @@
  1118.        }
  1119.  
  1120.        /* See the comment in VSockVmciNotifyPktSendPostEnqueue */
  1121. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  1122. +      sk->sk_data_ready(sk);
  1123. +#else
  1124.        sk->sk_data_ready(sk, 0);
  1125. +#endif
  1126.     }
  1127.  
  1128.     return err;
  1129. diff -ur a/vsock-only/shared/vm_assert.h b/vsock-only/shared/vm_assert.h
  1130. --- a/vsock-only/shared/vm_assert.h 2014-04-15 01:41:41.000000000 +0400
  1131. +++ b/vsock-only/shared/vm_assert.h 2014-03-24 14:00:58.000000000 +0400
  1132. @@ -256,7 +256,8 @@
  1133.                                         __FILE__, __LINE__, __FUNCTION__, \
  1134.                                         _fix))
  1135.  #else
  1136. -   #define DEPRECATED(_fix) do {} while (0)
  1137. +    #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
  1138. +    #define DEPRECATED(_fix) do {} while (0)
  1139.  #endif
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement