Advertisement
Guest User

poll_map.exp_c_prog

a guest
Sep 17th, 2013
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 76.13 KB | None | 0 0
  1. root@linaro-developer:~/systemtap-2.2.1/testsuite# cat .systemtap-root/cache/83/stap_83df0379e451bb1ec824cc2edc7d8b76_9733.c
  2.  
  3. #define STAP_MSG_RUNTIME_H_01 "myproc-unprivileged tapset function called without is_myproc checking for pid %d (euid %d)"
  4. #define STAP_MSG_LOC2C_01 "read fault [man error::fault] at 0x%p (%s)"
  5. #define STAP_MSG_LOC2C_02 "write fault [man error::fault] at 0x%p (%s)"
  6. #define STAP_MSG_LOC2C_03 "divide by zero in DWARF operand (%s)"
  7. #define STAP_VERSION(a, b) ( ((a) << 8) + (b) )
  8. #ifndef STAP_COMPAT_VERSION
  9. #define STAP_COMPAT_VERSION STAP_VERSION(2, 2)
  10. #endif
  11. #include "runtime_defines.h"
  12. #include "linux/perf_read.h"
  13. #define STP_PR_STAPUSR 0x2
  14. #define STP_PR_STAPSYS 0x4
  15. #define STP_PR_STAPDEV 0x8
  16. #define STP_PRIVILEGE 0x8
  17. int stp_required_privilege __attribute__ ((section (".stap_privilege"))) = STP_PRIVILEGE;
  18. #ifndef MAXNESTING
  19. #define MAXNESTING 2
  20. #endif
  21. #define STP_SKIP_BADVARS 0
  22. #define STP_PROBE_COUNT 43
  23. #include "runtime.h"
  24.  
  25. #include <linux/version.h>
  26. #include <linux/file.h>
  27. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
  28. #include <linux/fdtable.h>
  29. #endif
  30. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
  31. #include <linux/sched/rt.h>
  32. #endif
  33. #ifndef STAPCONF_TASK_UID
  34. #include <linux/cred.h>
  35. #endif
  36.  
  37.  
  38.  
  39. #define __STP_GET_USER(t, warn)                     \
  40.     do {                                \
  41.         __label__ deref_fault;                  \
  42.         t *_ptr = (t*) (intptr_t) STAP_ARG_addr;        \
  43.         STAP_RETVALUE = uread (_ptr);               \
  44.         if (0) {                        \
  45.         deref_fault:                        \
  46.             STAP_RETVALUE = 0;              \
  47.             CONTEXT->last_error = NULL;         \
  48.             if (warn) {                 \
  49.                 snprintf(CONTEXT->error_buffer,     \
  50.                     sizeof(CONTEXT->error_buffer),  \
  51.                     "user %s copy fault %p", #t, _ptr); \
  52.                 _stp_warn(CONTEXT->error_buffer);   \
  53.             }                       \
  54.         }                           \
  55.     } while (0)
  56.  
  57. #define STP_GET_USER(t) __STP_GET_USER(t, 0)
  58. #define STP_GET_USER_WARN(t) __STP_GET_USER(t, 1)
  59.  
  60.  
  61.  
  62. #define STAP_NEED_CONTEXT_TOKENIZE 1
  63.  
  64.  
  65.  
  66. struct context {
  67.   #include "common_probe_context.h"
  68.   union {
  69.     struct probe_2025_locals {
  70.       string_t __tmp0;
  71.       string_t __tmp1;
  72.       int64_t __tmp3;
  73.       int64_t __tmp4;
  74.     } probe_2025;
  75.     struct probe_2066_locals {
  76.       int64_t l_num_to_do;
  77.       string_t l_n;
  78.       string_t l_f;
  79.       union {
  80.         struct {
  81.           struct map_node *__tmp4;
  82.           union {
  83.             struct {
  84.               string_t __tmp5;
  85.               string_t __tmp6;
  86.               int64_t __tmp7;
  87.             };
  88.             struct {
  89.               int64_t __tmp10;
  90.             };
  91.           };
  92.         };
  93.         struct {
  94.           int64_t __tmp12;
  95.         };
  96.       };
  97.     } probe_2066;
  98.     struct probe_2067_locals {
  99.     } probe_2067;
  100.   } probe_locals;
  101.   union {
  102.     struct function_addr_locals {
  103.       int64_t __retvalue;
  104.     } function_addr;
  105.     struct function_execname_locals {
  106.       string_t __retvalue;
  107.     } function_execname;
  108.     struct function_exit_locals {
  109.       /* no return value */
  110.     } function_exit;
  111.     struct function_probefunc_locals {
  112.       int64_t __tmp0;
  113.       int64_t __tmp1;
  114.       string_t __retvalue;
  115.     } function_probefunc;
  116.     struct function_symname_locals {
  117.       int64_t l_addr;
  118.       string_t __retvalue;
  119.     } function_symname;
  120.     struct function_uaddr_locals {
  121.       int64_t __retvalue;
  122.     } function_uaddr;
  123.     struct function_user_mode_locals {
  124.       int64_t __retvalue;
  125.     } function_user_mode;
  126.     struct function_usymname_locals {
  127.       int64_t l_addr;
  128.       string_t __retvalue;
  129.     } function_usymname;
  130.   } locals [MAXNESTING+1];
  131.   #if MAXNESTING < 0
  132.   #error "MAXNESTING must be positive"
  133.   #endif
  134.   #ifndef STP_LEGACY_PRINT
  135.   union {
  136.     struct stp_printf_2_locals {
  137.       const char* arg0;
  138.       const char* arg1;
  139.       int64_t arg2;
  140.     } stp_printf_2;
  141.   } printf_locals;
  142.   #endif // STP_LEGACY_PRINT
  143. };
  144.  
  145. #include "runtime_context.h"
  146. #include "alloc.c"
  147. #define VALUE_TYPE INT64
  148. #define KEY1_TYPE STRING
  149. #define KEY2_TYPE STRING
  150. #include "map-gen.c"
  151. #undef MAP_DO_PMAP
  152. #undef VALUE_TYPE
  153. #undef KEY1_TYPE
  154. #undef KEY2_TYPE
  155. #include "map.c"
  156. #ifndef STP_LEGACY_PRINT
  157.  
  158. static void stp_printf_2 (struct context* __restrict__ c) {
  159.   struct stp_printf_2_locals * __restrict__ l = & c->printf_locals.stp_printf_2;
  160.   char *str = NULL, *end = NULL;
  161.   const char *src;
  162.   int width;
  163.   int precision;
  164.   unsigned long ptr_value;
  165.   int num_bytes;
  166.   (void) width;
  167.   (void) precision;
  168.   (void) ptr_value;
  169.   (void) num_bytes;
  170.   num_bytes = 0;
  171.   width = -1;
  172.   precision = -1;
  173.   num_bytes += _stp_vsprint_memory_size(l->arg0, width, precision, 's', 0);
  174.   num_bytes += sizeof(" called ") - 1;
  175.   width = -1;
  176.   precision = -1;
  177.   num_bytes += _stp_vsprint_memory_size(l->arg1, width, precision, 's', 0);
  178.   num_bytes += sizeof("\t") - 1;
  179.   width = -1;
  180.   precision = -1;
  181.   num_bytes += number_size(l->arg2, 10, width, precision, 2);
  182.   num_bytes += sizeof(" times\n") - 1;
  183.   num_bytes = clamp(num_bytes, 0, STP_BUFFER_SIZE);
  184.   str = (char*)_stp_reserve_bytes(num_bytes);
  185.   end = str ? str + num_bytes - 1 : 0;
  186.   if (str && str <= end) {
  187.     width = -1;
  188.     precision = -1;
  189.     str = _stp_vsprint_memory(str, end, l->arg0, width, precision, 's', 0);
  190.     src = " called ";
  191.     while (*src && str <= end)
  192.       *str++ = *src++;
  193.     width = -1;
  194.     precision = -1;
  195.     str = _stp_vsprint_memory(str, end, l->arg1, width, precision, 's', 0);
  196.     src = "\t";
  197.     while (*src && str <= end)
  198.       *str++ = *src++;
  199.     width = -1;
  200.     precision = -1;
  201.     str = number(str, end, l->arg2, 10, width, precision, 2);
  202.     src = " times\n";
  203.     while (*src && str <= end)
  204.       *str++ = *src++;
  205.   }
  206. }
  207. #endif // STP_LEGACY_PRINT
  208.  
  209. struct stp_globals {
  210.   MAP s_called;
  211.   rwlock_t s_called_lock;
  212.   #ifdef STP_TIMING
  213.   atomic_t s_called_lock_skip_count;
  214.   #endif
  215.  
  216.   int64_t s_num_polls;
  217.   rwlock_t s_num_polls_lock;
  218.   #ifdef STP_TIMING
  219.   atomic_t s_num_polls_lock_skip_count;
  220.   #endif
  221.  
  222. };
  223.  
  224. static struct stp_globals stp_global = {
  225.  
  226. };
  227. #include "common_session_state.h"
  228. #include "probe_lock.h"
  229. #ifdef STAP_NEED_GETTIMEOFDAY
  230. #include "time.c"
  231. #endif
  232.  
  233. static void function_addr (struct context * __restrict__ c);
  234.  
  235. static void function_execname (struct context * __restrict__ c);
  236.  
  237. static void function_exit (struct context * __restrict__ c);
  238.  
  239. static void function_probefunc (struct context * __restrict__ c);
  240.  
  241. static void function_symname (struct context * __restrict__ c);
  242.  
  243. static void function_uaddr (struct context * __restrict__ c);
  244.  
  245. static void function_user_mode (struct context * __restrict__ c);
  246.  
  247. static void function_usymname (struct context * __restrict__ c);
  248.  
  249. static void function_addr (struct context* __restrict__ c) {
  250.   __label__ out;
  251.   struct function_addr_locals *  __restrict__ l = & c->locals[c->nesting+1].function_addr;
  252.   (void) l;
  253.   #define CONTEXT c
  254.   #define THIS l
  255.   #define STAP_RETVALUE THIS->__retvalue
  256.   c->last_stmt = "identifier 'addr' at /usr/local/share/systemtap/tapset/linux/context.stp:354:10";
  257.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  258.     c->last_error = "MAXNESTING exceeded";
  259.     return;
  260.   } else {
  261.     c->nesting ++;
  262.   }
  263.   l->__retvalue = 0;
  264.   #define return goto out
  265.   {
  266.      /* pure */
  267.   if (CONTEXT->user_mode_p) {
  268.     STAP_RETVALUE = (intptr_t)(CONTEXT->uregs ? REG_IP(CONTEXT->uregs) : 0);
  269.   } else {
  270.     STAP_RETVALUE = (intptr_t)(CONTEXT->kregs ? REG_IP(CONTEXT->kregs) : 0);
  271.   }
  272.  
  273.   }
  274.   #undef return
  275. out:
  276.   if (0) goto out;
  277.   c->nesting --;
  278.   #undef CONTEXT
  279.   #undef THIS
  280.   #undef STAP_RETVALUE
  281. }
  282.  
  283.  
  284. static void function_execname (struct context* __restrict__ c) {
  285.   __label__ out;
  286.   struct function_execname_locals *  __restrict__ l = & c->locals[c->nesting+1].function_execname;
  287.   (void) l;
  288.   #define CONTEXT c
  289.   #define THIS l
  290.   #define STAP_RETVALUE THIS->__retvalue
  291.   c->last_stmt = "identifier 'execname' at /usr/local/share/systemtap/tapset/linux/context.stp:17:10";
  292.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  293.     c->last_error = "MAXNESTING exceeded";
  294.     return;
  295.   } else {
  296.     c->nesting ++;
  297.   }
  298.   l->__retvalue[0] = '\0';
  299.   #define return goto out
  300.   {
  301.      /* pure */ /* unprivileged */
  302.     strlcpy (STAP_RETVALUE, current->comm, MAXSTRINGLEN);
  303.  
  304.   }
  305.   #undef return
  306. out:
  307.   if (0) goto out;
  308.   c->nesting --;
  309.   #undef CONTEXT
  310.   #undef THIS
  311.   #undef STAP_RETVALUE
  312. }
  313.  
  314.  
  315. static void function_exit (struct context* __restrict__ c) {
  316.   __label__ out;
  317.   struct function_exit_locals *  __restrict__ l = & c->locals[c->nesting+1].function_exit;
  318.   (void) l;
  319.   #define CONTEXT c
  320.   #define THIS l
  321.   c->last_stmt = "identifier 'exit' at /usr/local/share/systemtap/tapset/logging.stp:49:10";
  322.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  323.     c->last_error = "MAXNESTING exceeded";
  324.     return;
  325.   } else {
  326.     c->nesting ++;
  327.   }
  328.   #define return goto out
  329.   {
  330.      /* unprivileged */
  331.     atomic_set (session_state(), STAP_SESSION_STOPPING);
  332.     _stp_exit ();
  333.  
  334.   }
  335.   #undef return
  336. out:
  337.   if (0) goto out;
  338.   c->nesting --;
  339.   #undef CONTEXT
  340.   #undef THIS
  341.   #undef STAP_RETVALUE
  342. }
  343.  
  344.  
  345. static void function_probefunc (struct context* __restrict__ c) {
  346.   __label__ out;
  347.   struct function_probefunc_locals *  __restrict__ l = & c->locals[c->nesting+1].function_probefunc;
  348.   (void) l;
  349.   #define CONTEXT c
  350.   #define THIS l
  351.   #define STAP_RETVALUE THIS->__retvalue
  352.   c->last_stmt = "identifier 'probefunc' at /usr/local/share/systemtap/tapset/linux/context-symbols.stp:99:10";
  353.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  354.     c->last_error = "MAXNESTING exceeded";
  355.     return;
  356.   } else {
  357.     c->nesting ++;
  358.   }
  359.   l->__retvalue[0] = '\0';
  360.   #define return goto out
  361.   strlcpy (l->__retvalue, ((
  362.   ({
  363.     function_user_mode (c);
  364.     if (unlikely(c->last_error)) goto out;
  365.     c->locals[c->nesting+1].function_user_mode.__retvalue;
  366.   })) ? (
  367.   ({
  368.     l->__tmp0 =
  369.     ({
  370.       function_uaddr (c);
  371.       if (unlikely(c->last_error)) goto out;
  372.       c->locals[c->nesting+1].function_uaddr.__retvalue;
  373.     });
  374.     c->locals[c->nesting+1].function_usymname.l_addr = l->__tmp0;
  375.     function_usymname (c);
  376.     if (unlikely(c->last_error)) goto out;
  377.     c->locals[c->nesting+1].function_usymname.__retvalue;
  378.   })) : (
  379.   ({
  380.     l->__tmp1 =
  381.     ({
  382.       function_addr (c);
  383.       if (unlikely(c->last_error)) goto out;
  384.       c->locals[c->nesting+1].function_addr.__retvalue;
  385.     });
  386.     c->locals[c->nesting+1].function_symname.l_addr = l->__tmp1;
  387.     function_symname (c);
  388.     if (unlikely(c->last_error)) goto out;
  389.     c->locals[c->nesting+1].function_symname.__retvalue;
  390.   }))), MAXSTRINGLEN);
  391.   c->actionremaining -= 1;
  392.   if (unlikely (c->actionremaining <= 0)) {
  393.     c->last_error = "MAXACTION exceeded";
  394.     c->last_stmt = "keyword at /usr/local/share/systemtap/tapset/linux/context-symbols.stp:133:5";
  395.     goto out;
  396.   }
  397.   goto out;
  398.   #undef return
  399. out:
  400.   if (0) goto out;
  401.   c->nesting --;
  402.   #undef CONTEXT
  403.   #undef THIS
  404.   #undef STAP_RETVALUE
  405. }
  406.  
  407.  
  408. static void function_symname (struct context* __restrict__ c) {
  409.   __label__ out;
  410.   struct function_symname_locals *  __restrict__ l = & c->locals[c->nesting+1].function_symname;
  411.   (void) l;
  412.   #define CONTEXT c
  413.   #define THIS l
  414.   #define STAP_ARG_addr THIS->l_addr
  415.   #define STAP_RETVALUE THIS->__retvalue
  416.   c->last_stmt = "identifier 'symname' at /usr/local/share/systemtap/tapset/linux/context-symbols.stp:225:10";
  417.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  418.     c->last_error = "MAXNESTING exceeded";
  419.     return;
  420.   } else {
  421.     c->nesting ++;
  422.   }
  423.   l->__retvalue[0] = '\0';
  424.   #define return goto out
  425.   {
  426.      /* pure */ /* pragma:symbols */
  427.      _stp_snprint_addr(STAP_RETVALUE, MAXSTRINGLEN, STAP_ARG_addr,
  428.                _STP_SYM_SYMBOL, NULL);
  429.  
  430.   }
  431.   #undef return
  432. out:
  433.   if (0) goto out;
  434.   c->nesting --;
  435.   #undef CONTEXT
  436.   #undef THIS
  437.   #undef STAP_ARG_addr
  438.   #undef STAP_RETVALUE
  439. }
  440.  
  441.  
  442. static void function_uaddr (struct context* __restrict__ c) {
  443.   __label__ out;
  444.   struct function_uaddr_locals *  __restrict__ l = & c->locals[c->nesting+1].function_uaddr;
  445.   (void) l;
  446.   #define CONTEXT c
  447.   #define THIS l
  448.   #define STAP_RETVALUE THIS->__retvalue
  449.   c->last_stmt = "identifier 'uaddr' at /usr/local/share/systemtap/tapset/linux/context.stp:373:10";
  450.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  451.     c->last_error = "MAXNESTING exceeded";
  452.     return;
  453.   } else {
  454.     c->nesting ++;
  455.   }
  456.   l->__retvalue = 0;
  457.   #define return goto out
  458.   assert_is_myproc();
  459.   {
  460.      /* pure */ /* myproc-unprivileged */
  461.   struct pt_regs *uregs;
  462.  
  463.   if (CONTEXT->user_mode_p)
  464.     uregs = CONTEXT->uregs;
  465.   else
  466.     uregs = _stp_current_pt_regs();
  467.  
  468.   if (uregs)
  469.     STAP_RETVALUE = (int64_t) REG_IP(uregs);
  470.   else
  471.     STAP_RETVALUE = 0;
  472.  
  473.   }
  474.   #undef return
  475. out:
  476.   if (0) goto out;
  477.   c->nesting --;
  478.   #undef CONTEXT
  479.   #undef THIS
  480.   #undef STAP_RETVALUE
  481. }
  482.  
  483.  
  484. static void function_user_mode (struct context* __restrict__ c) {
  485.   __label__ out;
  486.   struct function_user_mode_locals *  __restrict__ l = & c->locals[c->nesting+1].function_user_mode;
  487.   (void) l;
  488.   #define CONTEXT c
  489.   #define THIS l
  490.   #define STAP_RETVALUE THIS->__retvalue
  491.   c->last_stmt = "identifier 'user_mode' at /usr/local/share/systemtap/tapset/linux/context.stp:218:10";
  492.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  493.     c->last_error = "MAXNESTING exceeded";
  494.     return;
  495.   } else {
  496.     c->nesting ++;
  497.   }
  498.   l->__retvalue = 0;
  499.   #define return goto out
  500.   {
  501.      /* pure */ /* unprivileged */
  502.   STAP_RETVALUE = CONTEXT->user_mode_p ? 1 : 0;
  503.  
  504.   }
  505.   #undef return
  506. out:
  507.   if (0) goto out;
  508.   c->nesting --;
  509.   #undef CONTEXT
  510.   #undef THIS
  511.   #undef STAP_RETVALUE
  512. }
  513.  
  514.  
  515. static void function_usymname (struct context* __restrict__ c) {
  516.   __label__ out;
  517.   struct function_usymname_locals *  __restrict__ l = & c->locals[c->nesting+1].function_usymname;
  518.   (void) l;
  519.   #define CONTEXT c
  520.   #define THIS l
  521.   #define STAP_ARG_addr THIS->l_addr
  522.   #define STAP_RETVALUE THIS->__retvalue
  523.   c->last_stmt = "identifier 'usymname' at /usr/local/share/systemtap/tapset/linux/ucontext-symbols.stp:57:10";
  524.   if (unlikely (c->nesting+1 >= MAXNESTING)) {
  525.     c->last_error = "MAXNESTING exceeded";
  526.     return;
  527.   } else {
  528.     c->nesting ++;
  529.   }
  530.   l->__retvalue[0] = '\0';
  531.   #define return goto out
  532.   assert_is_myproc();
  533.   {
  534.    
  535. /* pure */ /* myproc-unprivileged */ /* pragma:vma */ /* pragma:symbols */
  536.      _stp_snprint_addr(STAP_RETVALUE, MAXSTRINGLEN, STAP_ARG_addr,
  537.                _STP_SYM_SYMBOL, current);
  538.  
  539.   }
  540.   #undef return
  541. out:
  542.   if (0) goto out;
  543.   c->nesting --;
  544.   #undef CONTEXT
  545.   #undef THIS
  546.   #undef STAP_ARG_addr
  547.   #undef STAP_RETVALUE
  548. }
  549.  
  550.  
  551. static void probe_2025 (struct context * __restrict__ c) {
  552.   __label__ out;
  553.   static const struct stp_probe_lock locks[] = {
  554.     {
  555.       .lock = global_lock(s_called),
  556.       .write_p = 1,
  557.       #ifdef STP_TIMING
  558.       .skipped = global_skipped(s_called),
  559.       #endif
  560.     },
  561.   };
  562.   struct probe_2025_locals * __restrict__ l = & c->probe_locals.probe_2025;
  563.   (void) l;
  564.   #if ! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPDEV) && \
  565.       ! STP_PRIVILEGE_CONTAINS (STP_PRIVILEGE, STP_PR_STAPSYS)
  566.   #error Internal Error: Probe kernel.function("vfs_truncate@fs/open.c:65").call generated in --unprivileged mode
  567.   #endif
  568.   if (!stp_lock_probe(locks, ARRAY_SIZE(locks)))
  569.     return;
  570.   (void)
  571.   ({
  572.     strlcpy (l->__tmp0,
  573.     ({
  574.       function_execname (c);
  575.       if (unlikely(c->last_error)) goto out;
  576.       c->locals[c->nesting+1].function_execname.__retvalue;
  577.     }), MAXSTRINGLEN);
  578.     strlcpy (l->__tmp1,
  579.     ({
  580.       function_probefunc (c);
  581.       if (unlikely(c->last_error)) goto out;
  582.       c->locals[c->nesting+1].function_probefunc.__retvalue;
  583.     }), MAXSTRINGLEN);
  584.     c->last_stmt = "identifier 'called' at ./systemtap.base/poll_map.stp:9:2";
  585.     l->__tmp3 = _stp_map_get_ssi (global(s_called), l->__tmp0, l->__tmp1);
  586.     l->__tmp4 = l->__tmp3;
  587.     l->__tmp3 += 1;
  588.     { int rc = _stp_map_set_ssi (global(s_called), l->__tmp0, l->__tmp1, l->__tmp3); if (unlikely(rc)) { c->last_error = "Array overflow, check MAXMAPENTRIES"; goto out; }};
  589.     l->__tmp4;
  590.   });
  591.   c->actionremaining -= 1;
  592.   if (unlikely (c->actionremaining <= 0)) {
  593.     c->last_error = "MAXACTION exceeded";
  594.     c->last_stmt = "identifier 'called' at ./systemtap.base/poll_map.stp:9:2";
  595.     goto out;
  596.   }
  597. out:
  598.   stp_unlock_probe(locks, ARRAY_SIZE(locks));
  599.   _stp_print_flush();
  600. }
  601.  
  602.  
  603. static void probe_2066 (struct context * __restrict__ c) {
  604.   __label__ out;
  605.   static const struct stp_probe_lock locks[] = {
  606.     {
  607.       .lock = global_lock(s_called),
  608.       .write_p = 1,
  609.       #ifdef STP_TIMING
  610.       .skipped = global_skipped(s_called),
  611.       #endif
  612.     },
  613.     {
  614.       .lock = global_lock(s_num_polls),
  615.       .write_p = 1,
  616.       #ifdef STP_TIMING
  617.       .skipped = global_skipped(s_num_polls),
  618.       #endif
  619.     },
  620.   };
  621.   struct probe_2066_locals * __restrict__ l = & c->probe_locals.probe_2066;
  622.   (void) l;
  623.   if (!stp_lock_probe(locks, ARRAY_SIZE(locks)))
  624.     return;
  625.   l->l_num_to_do = 0;
  626.   l->l_n[0] = '\0';
  627.   l->l_f[0] = '\0';
  628.   {
  629.     (void)
  630.     ({
  631.       _stp_print ("\n\n");
  632.     });
  633.    
  634.     (void)
  635.     ({
  636.       l->l_num_to_do = ((int64_t)10LL);
  637.       ((int64_t)10LL);
  638.     });
  639.    
  640.     _stp_map_sort_ssi (global(s_called), 0, 1);
  641.     l->__tmp4 = _stp_map_start (global(s_called));
  642.     c->actionremaining -= 3;
  643.     if (unlikely (c->actionremaining <= 0)) {
  644.       c->last_error = "MAXACTION exceeded";
  645.       c->last_stmt = "keyword at ./systemtap.base/poll_map.stp:16:2";
  646.       goto out;
  647.     }
  648.   top_0:
  649.     if (! (l->__tmp4)) goto break_0;
  650.     {
  651.       strlcpy (l->l_n, (_stp_map_key_get_str_ssi (l->__tmp4, 1) ?: ""), MAXSTRINGLEN);
  652.       strlcpy (l->l_f, (_stp_map_key_get_str_ssi (l->__tmp4, 2) ?: ""), MAXSTRINGLEN);
  653.       {
  654.         (void)
  655.         ({
  656.           strlcpy (l->__tmp5, l->l_n, MAXSTRINGLEN);
  657.           strlcpy (l->__tmp6, l->l_f, MAXSTRINGLEN);
  658.           l->__tmp7 = _stp_map_get_int64_ssi (l->__tmp4);
  659.           #ifndef STP_LEGACY_PRINT
  660.             c->printf_locals.stp_printf_2.arg0 = l->__tmp5;
  661.             c->printf_locals.stp_printf_2.arg1 = l->__tmp6;
  662.             c->printf_locals.stp_printf_2.arg2 = l->__tmp7;
  663.             stp_printf_2 (c);
  664.           #else // STP_LEGACY_PRINT
  665.             _stp_printf ("%s called %s\t%lld times\n", l->__tmp5, l->__tmp6, l->__tmp7);
  666.           #endif // STP_LEGACY_PRINT
  667.           if (unlikely(c->last_error)) goto out;
  668.           ((int64_t)0LL);
  669.         });
  670.        
  671.         (void)
  672.         ({
  673.           l->__tmp10 = l->l_num_to_do;
  674.           l->l_num_to_do -= 1;
  675.           l->__tmp10;
  676.         });
  677.        
  678.         c->actionremaining -= 4;
  679.         if (unlikely (c->actionremaining <= 0)) {
  680.           c->last_error = "MAXACTION exceeded";
  681.           c->last_stmt = "keyword at ./systemtap.base/poll_map.stp:19:3";
  682.           goto out;
  683.         }
  684.         if ((((l->l_num_to_do) <= (((int64_t)0LL))))) {
  685.           c->actionremaining -= 1;
  686.           if (unlikely (c->actionremaining <= 0)) {
  687.             c->last_error = "MAXACTION exceeded";
  688.             c->last_stmt = "keyword at ./systemtap.base/poll_map.stp:20:4";
  689.             goto out;
  690.           }
  691.           goto break_0;
  692.         }
  693.        
  694.       }
  695.     }
  696.   continue_0:
  697.     l->__tmp4 = _stp_map_iter (global(s_called), l->__tmp4);
  698.     goto top_0;
  699.   break_0:
  700.     ; /* dummy statement */
  701.    
  702.     _stp_map_clear (global(s_called));
  703.    
  704.     (void)
  705.     ({
  706.       l->__tmp12 = global(s_num_polls);
  707.       global(s_num_polls) += 1;
  708.       l->__tmp12;
  709.     });
  710.    
  711.     c->actionremaining -= 3;
  712.     if (unlikely (c->actionremaining <= 0)) {
  713.       c->last_error = "MAXACTION exceeded";
  714.       c->last_stmt = "keyword at ./systemtap.base/poll_map.stp:24:2";
  715.       goto out;
  716.     }
  717.     if ((((global(s_num_polls)) > (((int64_t)30LL))))) {
  718.       (void)
  719.       ({
  720.         function_exit (c);
  721.         if (unlikely(c->last_error)) goto out;
  722.         (void) 0;
  723.       });
  724.       c->actionremaining -= 1;
  725.       if (unlikely (c->actionremaining <= 0)) {
  726.         c->last_error = "MAXACTION exceeded";
  727.         c->last_stmt = "identifier 'exit' at ./systemtap.base/poll_map.stp:25:3";
  728.         goto out;
  729.       }
  730.     }
  731.    
  732.   }
  733. out:
  734.   stp_unlock_probe(locks, ARRAY_SIZE(locks));
  735.   _stp_print_flush();
  736. }
  737.  
  738.  
  739. static void probe_2067 (struct context * __restrict__ c) {
  740.   __label__ out;
  741.   struct probe_2067_locals * __restrict__ l = & c->probe_locals.probe_2067;
  742.   (void) l;
  743.   c->actionremaining -= 1;
  744.   if (unlikely (c->actionremaining <= 0)) {
  745.     c->last_error = "MAXACTION exceeded";
  746.     c->last_stmt = "keyword at ./systemtap.base/poll_map.stp:29:2";
  747.     goto out;
  748.   }
  749.   if ((((global(s_num_polls)) <= (((int64_t)30LL))))) {
  750.     (void)
  751.     ({
  752.       _stp_print ("FAIL\n");
  753.     });
  754.     c->actionremaining -= 1;
  755.     if (unlikely (c->actionremaining <= 0)) {
  756.       c->last_error = "MAXACTION exceeded";
  757.       c->last_stmt = "identifier 'print' at ./systemtap.base/poll_map.stp:30:3";
  758.       goto out;
  759.     }
  760.   }
  761.   else {
  762.     (void)
  763.     ({
  764.       _stp_print ("SUCCESS\n");
  765.     });
  766.     c->actionremaining -= 1;
  767.     if (unlikely (c->actionremaining <= 0)) {
  768.       c->last_error = "MAXACTION exceeded";
  769.       c->last_stmt = "identifier 'print' at ./systemtap.base/poll_map.stp:32:3";
  770.       goto out;
  771.     }
  772.   }
  773. out:
  774.   _stp_print_flush();
  775. }
  776.  
  777. struct stap_probe {
  778.   size_t index;
  779.   void (* const ph) (struct context*);
  780.   #if defined(STP_TIMING) || defined(STP_ALIBI)
  781.   const char location[37];
  782.   const char derivation[41];
  783.   #define STAP_PROBE_INIT_TIMING(L, D) .location=(L), .derivation=(D),
  784.   #else
  785.   #define STAP_PROBE_INIT_TIMING(L, D)
  786.   #endif
  787.   const char * const pp;
  788.   #ifdef STP_NEED_PROBE_NAME
  789.   const char * const pn;
  790.   #define STAP_PROBE_INIT_NAME(PN) .pn=(PN),
  791.   #else
  792.   #define STAP_PROBE_INIT_NAME(PN)
  793.   #endif
  794.   #define STAP_PROBE_INIT(I, PH, PP, PN, L, D) { .index=(I), .ph=(PH), .pp=(PP), STAP_PROBE_INIT_NAME(PN) STAP_PROBE_INIT_TIMING(L, D) }
  795. } static const stap_probes[] = {
  796.   STAP_PROBE_INIT(0, &probe_2025, "kernel.function(\"vfs_truncate@fs/open.c:65\").call", "kernel.function(\"vfs_truncate@fs/open.c:65\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  797.   STAP_PROBE_INIT(1, &probe_2025, "kernel.function(\"vfs_readv@fs/read_write.c:779\").call", "kernel.function(\"vfs_readv@fs/read_write.c:779\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  798.   STAP_PROBE_INIT(2, &probe_2025, "kernel.function(\"vfs_write@fs/read_write.c:459\").call", "kernel.function(\"vfs_write@fs/read_write.c:459\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  799.   STAP_PROBE_INIT(3, &probe_2025, "kernel.function(\"vfs_setpos@fs/read_write.c:56\").call", "kernel.function(\"vfs_setpos@fs/read_write.c:56\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  800.   STAP_PROBE_INIT(4, &probe_2025, "kernel.function(\"vfs_read@fs/read_write.c:382\").call", "kernel.function(\"vfs_read@fs/read_write.c:382\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  801.   STAP_PROBE_INIT(5, &probe_2025, "kernel.function(\"vfs_llseek@fs/read_write.c:254\").call", "kernel.function(\"vfs_llseek@fs/read_write.c:254\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  802.   STAP_PROBE_INIT(6, &probe_2025, "kernel.function(\"vfs_writev@fs/read_write.c:792\").call", "kernel.function(\"vfs_writev@fs/read_write.c:792\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  803.   STAP_PROBE_INIT(7, &probe_2025, "kernel.function(\"vfs_stat@fs/stat.c:102\").call", "kernel.function(\"vfs_stat@fs/stat.c:102\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  804.   STAP_PROBE_INIT(8, &probe_2025, "kernel.function(\"vfs_lstat@fs/stat.c:108\").call", "kernel.function(\"vfs_lstat@fs/stat.c:108\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  805.   STAP_PROBE_INIT(9, &probe_2025, "kernel.function(\"vfs_getattr@fs/stat.c:40\").call", "kernel.function(\"vfs_getattr@fs/stat.c:40\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  806.   STAP_PROBE_INIT(10, &probe_2025, "kernel.function(\"vfs_fstatat@fs/stat.c:71\").call", "kernel.function(\"vfs_fstatat@fs/stat.c:71\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  807.   STAP_PROBE_INIT(11, &probe_2025, "kernel.function(\"vfs_fstat@fs/stat.c:58\").call", "kernel.function(\"vfs_fstat@fs/stat.c:58\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  808.   STAP_PROBE_INIT(12, &probe_2025, "kernel.function(\"vfs_mknod@fs/namei.c:3165\").call", "kernel.function(\"vfs_mknod@fs/namei.c:3165\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  809.   STAP_PROBE_INIT(13, &probe_2025, "kernel.function(\"vfs_symlink@fs/namei.c:3548\").call", "kernel.function(\"vfs_symlink@fs/namei.c:3548\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  810.   STAP_PROBE_INIT(14, &probe_2025, "kernel.function(\"vfs_readlink@fs/namei.c:3984\").call", "kernel.function(\"vfs_readlink@fs/namei.c:3984\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  811.   STAP_PROBE_INIT(15, &probe_2025, "kernel.function(\"vfs_link@fs/namei.c:3604\").call", "kernel.function(\"vfs_link@fs/namei.c:3604\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  812.   STAP_PROBE_INIT(16, &probe_2025, "kernel.function(\"vfs_rmdir@fs/namei.c:3335\").call", "kernel.function(\"vfs_rmdir@fs/namei.c:3335\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  813.   STAP_PROBE_INIT(17, &probe_2025, "kernel.function(\"vfs_follow_link@fs/namei.c:4023\").call", "kernel.function(\"vfs_follow_link@fs/namei.c:4023\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  814.   STAP_PROBE_INIT(18, &probe_2025, "kernel.function(\"vfs_rename@fs/namei.c:3834\").call", "kernel.function(\"vfs_rename@fs/namei.c:3834\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  815.   STAP_PROBE_INIT(19, &probe_2025, "kernel.function(\"vfs_create@fs/namei.c:2314\").call", "kernel.function(\"vfs_create@fs/namei.c:2314\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  816.   STAP_PROBE_INIT(20, &probe_2025, "kernel.function(\"vfs_path_lookup@fs/namei.c:2057\").call", "kernel.function(\"vfs_path_lookup@fs/namei.c:2057\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  817.   STAP_PROBE_INIT(21, &probe_2025, "kernel.function(\"vfs_path_lookup@fs/namei.c:2057\").call", "kernel.function(\"vfs_path_lookup@fs/namei.c:2057\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  818.   STAP_PROBE_INIT(22, &probe_2025, "kernel.function(\"vfs_mkdir@fs/namei.c:3256\").call", "kernel.function(\"vfs_mkdir@fs/namei.c:3256\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  819.   STAP_PROBE_INIT(23, &probe_2025, "kernel.function(\"vfs_unlink@fs/namei.c:3434\").call", "kernel.function(\"vfs_unlink@fs/namei.c:3434\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  820.   STAP_PROBE_INIT(24, &probe_2025, "kernel.function(\"vfs_caches_init@fs/dcache.c:3065\").call", "kernel.function(\"vfs_caches_init@fs/dcache.c:3065\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  821.   STAP_PROBE_INIT(25, &probe_2025, "kernel.function(\"vfs_caches_init_early@fs/dcache.c:3059\").call", "kernel.function(\"vfs_caches_init_early@fs/dcache.c:3059\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  822.   STAP_PROBE_INIT(26, &probe_2025, "kernel.function(\"vfs_kern_mount@fs/namespace.c:775\").call", "kernel.function(\"vfs_kern_mount@fs/namespace.c:775\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  823.   STAP_PROBE_INIT(27, &probe_2025, "kernel.function(\"vfs_setxattr@fs/xattr.c:122\").call", "kernel.function(\"vfs_setxattr@fs/xattr.c:122\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  824.   STAP_PROBE_INIT(28, &probe_2025, "kernel.function(\"vfs_listxattr@fs/xattr.c:267\").call", "kernel.function(\"vfs_listxattr@fs/xattr.c:267\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  825.   STAP_PROBE_INIT(29, &probe_2025, "kernel.function(\"vfs_xattr_cmp@fs/xattr.c:212\").call", "kernel.function(\"vfs_xattr_cmp@fs/xattr.c:212\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  826.   STAP_PROBE_INIT(30, &probe_2025, "kernel.function(\"vfs_getxattr@fs/xattr.c:231\").call", "kernel.function(\"vfs_getxattr@fs/xattr.c:231\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  827.   STAP_PROBE_INIT(31, &probe_2025, "kernel.function(\"vfs_removexattr@fs/xattr.c:287\").call", "kernel.function(\"vfs_removexattr@fs/xattr.c:287\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  828.   STAP_PROBE_INIT(32, &probe_2025, "kernel.function(\"vfs_getxattr_alloc@fs/xattr.c:181\").call", "kernel.function(\"vfs_getxattr_alloc@fs/xattr.c:181\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  829.   STAP_PROBE_INIT(33, &probe_2025, "kernel.function(\"vfs_fsync@fs/sync.c:194\").call", "kernel.function(\"vfs_fsync@fs/sync.c:194\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  830.   STAP_PROBE_INIT(34, &probe_2025, "kernel.function(\"vfs_fsync_range@fs/sync.c:178\").call", "kernel.function(\"vfs_fsync_range@fs/sync.c:178\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  831.   STAP_PROBE_INIT(35, &probe_2025, "kernel.function(\"vfs_statfs@fs/statfs.c:66\").call", "kernel.function(\"vfs_statfs@fs/statfs.c:66\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  832.   STAP_PROBE_INIT(36, &probe_2025, "kernel.function(\"vfs_ustat@fs/statfs.c:216\").call", "kernel.function(\"vfs_ustat@fs/statfs.c:216\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  833.   STAP_PROBE_INIT(37, &probe_2025, "kernel.function(\"vfs_setlease@fs/locks.c:1615\").call", "kernel.function(\"vfs_setlease@fs/locks.c:1615\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  834.   STAP_PROBE_INIT(38, &probe_2025, "kernel.function(\"vfs_lock_file@fs/locks.c:1910\").call", "kernel.function(\"vfs_lock_file@fs/locks.c:1910\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  835.   STAP_PROBE_INIT(39, &probe_2025, "kernel.function(\"vfs_cancel_lock@fs/locks.c:2247\").call", "kernel.function(\"vfs_cancel_lock@fs/locks.c:2247\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  836.   STAP_PROBE_INIT(40, &probe_2025, "kernel.function(\"vfs_test_lock@fs/locks.c:1798\").call", "kernel.function(\"vfs_test_lock@fs/locks.c:1798\").call", "./systemtap.base/poll_map.stp:8:1", " from: kernel.function(\"vfs_*\").call"),
  837.   STAP_PROBE_INIT(41, &probe_2066, "timer.ms(100)", "timer.ms(100)", "./systemtap.base/poll_map.stp:12:1", " from: timer.ms(100)"),
  838.   STAP_PROBE_INIT(42, &probe_2067, "end", "end", "./systemtap.base/poll_map.stp:28:1", " from: end"),
  839. };
  840.  
  841. /* ---- begin/end/error probes ---- */
  842. static struct stap_be_probe {
  843.   const struct stap_probe * const probe;
  844.   int state, type;
  845. } stap_be_probes[] = {
  846.   { .probe=(&stap_probes[42]), .state=STAP_SESSION_STOPPING, .type=1 },
  847. };
  848. static void enter_be_probe (struct stap_be_probe *stp) {
  849.   #ifdef STP_ALIBI
  850.   atomic_inc(probe_alibi(stp->probe->index));
  851.   #else
  852.   struct context* __restrict__ c;
  853.   #if !INTERRUPTIBLE
  854.   unsigned long flags;
  855.   #endif
  856.   #ifdef STP_TIMING
  857.   Stat stat = probe_timing(stp->probe->index);
  858.   #endif
  859.   #ifdef STP_TIMING
  860.   cycles_t cycles_atstart = get_cycles ();
  861.   #endif
  862.   #if INTERRUPTIBLE
  863.   preempt_disable ();
  864.   #else
  865.   local_irq_save (flags);
  866.   #endif
  867.   if (unlikely ((((unsigned long) (& c)) & (THREAD_SIZE-1))
  868.     < (MINSTACKSPACE + sizeof (struct thread_info)))) {
  869.     atomic_inc (skipped_count());
  870.     #ifdef STP_TIMING
  871.     atomic_inc (skipped_count_lowstack());
  872.     #endif
  873.     goto probe_epilogue;
  874.   }
  875.   if (atomic_read (session_state()) != stp->state)
  876.     goto probe_epilogue;
  877.   c = _stp_runtime_entryfn_get_context();
  878.   if (atomic_inc_return (& c->busy) != 1) {
  879.     #if !INTERRUPTIBLE
  880.     atomic_inc (skipped_count());
  881.     #endif
  882.     #ifdef STP_TIMING
  883.     atomic_inc (skipped_count_reentrant());
  884.     #ifdef DEBUG_REENTRANCY
  885.     _stp_warn ("Skipped %s due to %s residency on cpu %u\n", stp->probe->pp, c->probe_point ?: "?", smp_processor_id());
  886.     #endif
  887.     #endif
  888.     atomic_dec (& c->busy);
  889.     goto probe_epilogue;
  890.   }
  891.  
  892.   c->last_stmt = 0;
  893.   c->last_error = 0;
  894.   c->nesting = -1;
  895.   c->uregs = 0;
  896.   c->kregs = 0;
  897.   #if defined __ia64__
  898.   c->unwaddr = 0;
  899.   #endif
  900.   c->probe_point = stp->probe->pp;
  901.   #ifdef STP_NEED_PROBE_NAME
  902.   c->probe_name = stp->probe->pn;
  903.   #endif
  904.   c->probe_type = stp_probe_type_been;
  905.   memset(&c->ips, 0, sizeof(c->ips));
  906.   c->user_mode_p = 0; c->full_uregs_p = 0;
  907.   #ifdef STAP_NEED_REGPARM
  908.   c->regparm = 0;
  909.   #endif
  910.   #if INTERRUPTIBLE
  911.   c->actionremaining = MAXACTION_INTERRUPTIBLE;
  912.   #else
  913.   c->actionremaining = MAXACTION;
  914.   #endif
  915.   #if defined(STP_NEED_UNWIND_DATA)
  916.   c->uwcache_user.state = uwcache_uninitialized;
  917.   c->uwcache_kernel.state = uwcache_uninitialized;
  918.   #endif
  919.   (*stp->probe->ph) (c);
  920.   #ifdef STP_TIMING
  921.   {
  922.     cycles_t cycles_atend = get_cycles ();
  923.     int32_t cycles_elapsed = ((int32_t)cycles_atend > (int32_t)cycles_atstart)
  924.       ? ((int32_t)cycles_atend - (int32_t)cycles_atstart)
  925.       : (~(int32_t)0) - (int32_t)cycles_atstart + (int32_t)cycles_atend + 1;
  926.     #ifdef STP_TIMING
  927.     if (likely (stat)) _stp_stat_add(stat, cycles_elapsed);
  928.     #endif
  929.   }
  930.   #endif
  931.   c->probe_point = 0;
  932.   #ifdef STP_NEED_PROBE_NAME
  933.   c->probe_name = 0;
  934.   #endif
  935.   c->probe_type = 0;
  936.   if (unlikely (c->last_error && c->last_error[0])) {
  937.     if (c->last_stmt != NULL)
  938.       _stp_softerror ("%s near %s", c->last_error, c->last_stmt);
  939.     else
  940.       _stp_softerror ("%s", c->last_error);
  941.     atomic_inc (error_count());
  942.     if (atomic_read (error_count()) > MAXERRORS) {
  943.       atomic_set (session_state(), STAP_SESSION_ERROR);
  944.       _stp_exit ();
  945.     }
  946.   }
  947.   atomic_dec (&c->busy);
  948. probe_epilogue:
  949.   _stp_runtime_entryfn_put_context();
  950.   if (unlikely (atomic_read (skipped_count()) > MAXSKIPPED)) {
  951.     if (unlikely (pseudo_atomic_cmpxchg(session_state(), STAP_SESSION_RUNNING, STAP_SESSION_ERROR) == STAP_SESSION_RUNNING))
  952.     _stp_error ("Skipped too many probes, check MAXSKIPPED or try again with stap -t for more details.");
  953.   }
  954.   #if INTERRUPTIBLE
  955.   preempt_enable_no_resched ();
  956.   #else
  957.   local_irq_restore (flags);
  958.   #endif
  959.   #endif // STP_ALIBI
  960. }
  961. /* ---- dwarf probes ---- */
  962. #if ! defined(CONFIG_KPROBES)
  963. #error "Need CONFIG_KPROBES!"
  964. #endif
  965.  
  966. #ifndef KRETACTIVE
  967. #define KRETACTIVE (max(15,6*(int)num_possible_cpus()))
  968. #endif
  969. #include "linux/kprobes-common.h"
  970. static int enter_kprobe_probe (struct kprobe *inst, struct pt_regs *regs);
  971. static int enter_kretprobe_probe (struct kretprobe_instance *inst, struct pt_regs *regs);
  972. #if defined(STAPCONF_UNREGISTER_KPROBES)
  973. static void * stap_unreg_kprobes[41];
  974. #endif
  975. static struct stap_dwarf_kprobe stap_dwarf_kprobes[41];
  976. static struct stap_dwarf_probe {
  977.   const unsigned return_p:1;
  978.   const unsigned maxactive_p:1;
  979.   const unsigned optional_p:1;
  980.   unsigned registered_p:1;
  981.   const unsigned short maxactive_val;
  982.   const unsigned short saved_longs;
  983.   const unsigned short saved_strings;
  984.   const char module[7];
  985.   const char section[7];
  986.   const unsigned long address;
  987.   const struct stap_probe * const probe;
  988.   const struct stap_probe * const entry_probe;
  989. } stap_dwarf_probes[] = {
  990.   { .address=(unsigned long)0xf22ecULL, .module="kernel", .section="_stext", .probe=(&stap_probes[0]), },
  991.   { .address=(unsigned long)0xf4a2cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[1]), },
  992.   { .address=(unsigned long)0xf4004ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[2]), },
  993.   { .address=(unsigned long)0xf3348ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[3]), },
  994.   { .address=(unsigned long)0xf419cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[4]), },
  995.   { .address=(unsigned long)0xf33f4ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[5]), },
  996.   { .address=(unsigned long)0xf49acULL, .module="kernel", .section="_stext", .probe=(&stap_probes[6]), },
  997.   { .address=(unsigned long)0xf8860ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[7]), },
  998.   { .address=(unsigned long)0xf8834ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[8]), },
  999.   { .address=(unsigned long)0xf86dcULL, .module="kernel", .section="_stext", .probe=(&stap_probes[9]), },
  1000.   { .address=(unsigned long)0xf8790ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[10]), },
  1001.   { .address=(unsigned long)0xf872cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[11]), },
  1002.   { .address=(unsigned long)0xff378ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[12]), },
  1003.   { .address=(unsigned long)0xfe7c8ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[13]), },
  1004.   { .address=(unsigned long)0xfdc44ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[14]), },
  1005.   { .address=(unsigned long)0xff1d0ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[15]), },
  1006.   { .address=(unsigned long)0x100718ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[16]), },
  1007.   { .address=(unsigned long)0xff0fcULL, .module="kernel", .section="_stext", .probe=(&stap_probes[17]), },
  1008.   { .address=(unsigned long)0x100980ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[18]), },
  1009.   { .address=(unsigned long)0xfe624ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[19]), },
  1010.   { .address=(unsigned long)0x448460ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[20]), },
  1011.   { .address=(unsigned long)0x10029cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[21]), },
  1012.   { .address=(unsigned long)0xfe6e8ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[22]), },
  1013.   { .address=(unsigned long)0x100478ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[23]), },
  1014.   { .address=(unsigned long)0x654778ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[24]), },
  1015.   { .address=(unsigned long)0x6546d0ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[25]), },
  1016.   { .address=(unsigned long)0x11035cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[26]), },
  1017.   { .address=(unsigned long)0x115910ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[27]), },
  1018.   { .address=(unsigned long)0x114f70ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[28]), },
  1019.   { .address=(unsigned long)0x115c40ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[29]), },
  1020.   { .address=(unsigned long)0x1154f0ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[30]), },
  1021.   { .address=(unsigned long)0x1156d4ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[31]), },
  1022.   { .address=(unsigned long)0x115b4cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[32]), },
  1023.   { .address=(unsigned long)0x11f7b0ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[33]), },
  1024.   { .address=(unsigned long)0x11f75cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[34]), },
  1025.   { .address=(unsigned long)0x120cf8ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[35]), },
  1026.   { .address=(unsigned long)0x120f88ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[36]), },
  1027.   { .address=(unsigned long)0x13bfccULL, .module="kernel", .section="_stext", .probe=(&stap_probes[37]), },
  1028.   { .address=(unsigned long)0x13b95cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[38]), },
  1029.   { .address=(unsigned long)0x139ff0ULL, .module="kernel", .section="_stext", .probe=(&stap_probes[39]), },
  1030.   { .address=(unsigned long)0x13af5cULL, .module="kernel", .section="_stext", .probe=(&stap_probes[40]), },
  1031. };
  1032.  
  1033. static int enter_kprobe_probe (struct kprobe *inst, struct pt_regs *regs) {
  1034.   int kprobe_idx = ((uintptr_t)inst-(uintptr_t)stap_dwarf_kprobes)/sizeof(struct stap_dwarf_kprobe);
  1035.   struct stap_dwarf_probe *sdp = &stap_dwarf_probes[((kprobe_idx >= 0 && kprobe_idx < 41)?kprobe_idx:0)];
  1036.   #ifdef STP_ALIBI
  1037.   atomic_inc(probe_alibi(sdp->probe->index));
  1038.   #else
  1039.   struct context* __restrict__ c;
  1040.   #if !INTERRUPTIBLE
  1041.   unsigned long flags;
  1042.   #endif
  1043.   #ifdef STP_TIMING
  1044.   Stat stat = probe_timing(sdp->probe->index);
  1045.   #endif
  1046.   #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1047.   cycles_t cycles_atstart = get_cycles ();
  1048.   #endif
  1049.   #if INTERRUPTIBLE
  1050.   preempt_disable ();
  1051.   #else
  1052.   local_irq_save (flags);
  1053.   #endif
  1054.   if (unlikely ((((unsigned long) (& c)) & (THREAD_SIZE-1))
  1055.     < (MINSTACKSPACE + sizeof (struct thread_info)))) {
  1056.     atomic_inc (skipped_count());
  1057.     #ifdef STP_TIMING
  1058.     atomic_inc (skipped_count_lowstack());
  1059.     #endif
  1060.     goto probe_epilogue;
  1061.   }
  1062.   if (atomic_read (session_state()) != STAP_SESSION_RUNNING)
  1063.     goto probe_epilogue;
  1064.   c = _stp_runtime_entryfn_get_context();
  1065.   if (atomic_inc_return (& c->busy) != 1) {
  1066.     #if !INTERRUPTIBLE
  1067.     atomic_inc (skipped_count());
  1068.     #endif
  1069.     #ifdef STP_TIMING
  1070.     atomic_inc (skipped_count_reentrant());
  1071.     #ifdef DEBUG_REENTRANCY
  1072.     _stp_warn ("Skipped %s due to %s residency on cpu %u\n", sdp->probe->pp, c->probe_point ?: "?", smp_processor_id());
  1073.     #endif
  1074.     #endif
  1075.     atomic_dec (& c->busy);
  1076.     goto probe_epilogue;
  1077.   }
  1078.  
  1079.   c->last_stmt = 0;
  1080.   c->last_error = 0;
  1081.   c->nesting = -1;
  1082.   c->uregs = 0;
  1083.   c->kregs = 0;
  1084.   #if defined __ia64__
  1085.   c->unwaddr = 0;
  1086.   #endif
  1087.   c->probe_point = sdp->probe->pp;
  1088.   #ifdef STP_NEED_PROBE_NAME
  1089.   c->probe_name = sdp->probe->pn;
  1090.   #endif
  1091.   c->probe_type = stp_probe_type_kprobe;
  1092.   memset(&c->ips, 0, sizeof(c->ips));
  1093.   c->user_mode_p = 0; c->full_uregs_p = 0;
  1094.   #ifdef STAP_NEED_REGPARM
  1095.   c->regparm = 0;
  1096.   #endif
  1097.   #if INTERRUPTIBLE
  1098.   c->actionremaining = MAXACTION_INTERRUPTIBLE;
  1099.   #else
  1100.   c->actionremaining = MAXACTION;
  1101.   #endif
  1102.   #if defined(STP_NEED_UNWIND_DATA)
  1103.   c->uwcache_user.state = uwcache_uninitialized;
  1104.   c->uwcache_kernel.state = uwcache_uninitialized;
  1105.   #endif
  1106.   c->kregs = regs;
  1107.   {
  1108.     unsigned long kprobes_ip = REG_IP(c->kregs);
  1109.     SET_REG_IP(regs, (unsigned long) inst->addr);
  1110.     (*sdp->probe->ph) (c);
  1111.     SET_REG_IP(regs, kprobes_ip);
  1112.   }
  1113.   #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1114.   {
  1115.     cycles_t cycles_atend = get_cycles ();
  1116.     int32_t cycles_elapsed = ((int32_t)cycles_atend > (int32_t)cycles_atstart)
  1117.       ? ((int32_t)cycles_atend - (int32_t)cycles_atstart)
  1118.       : (~(int32_t)0) - (int32_t)cycles_atstart + (int32_t)cycles_atend + 1;
  1119.     #ifdef STP_TIMING
  1120.     if (likely (stat)) _stp_stat_add(stat, cycles_elapsed);
  1121.     #endif
  1122.     #ifdef STP_OVERLOAD
  1123.     {
  1124.       cycles_t interval = (cycles_atend > c->cycles_base)
  1125.         ? (cycles_atend - c->cycles_base)
  1126.         : (STP_OVERLOAD_INTERVAL + 1);
  1127.       c->cycles_sum += cycles_elapsed;
  1128.       if (interval > STP_OVERLOAD_INTERVAL) {
  1129.         if (c->cycles_sum > STP_OVERLOAD_THRESHOLD) {
  1130.           _stp_error ("probe overhead exceeded threshold");
  1131.           atomic_set (session_state(), STAP_SESSION_ERROR);
  1132.           atomic_inc (error_count());
  1133.         }
  1134.         c->cycles_base = cycles_atend;
  1135.         c->cycles_sum = 0;
  1136.       }
  1137.     }
  1138.     #endif
  1139.   }
  1140.   #endif
  1141.   c->probe_point = 0;
  1142.   #ifdef STP_NEED_PROBE_NAME
  1143.   c->probe_name = 0;
  1144.   #endif
  1145.   c->probe_type = 0;
  1146.   if (unlikely (c->last_error && c->last_error[0])) {
  1147.     if (c->last_stmt != NULL)
  1148.       _stp_softerror ("%s near %s", c->last_error, c->last_stmt);
  1149.     else
  1150.       _stp_softerror ("%s", c->last_error);
  1151.     atomic_inc (error_count());
  1152.     if (atomic_read (error_count()) > MAXERRORS) {
  1153.       atomic_set (session_state(), STAP_SESSION_ERROR);
  1154.       _stp_exit ();
  1155.     }
  1156.   }
  1157.   atomic_dec (&c->busy);
  1158. probe_epilogue:
  1159.   _stp_runtime_entryfn_put_context();
  1160.   if (unlikely (atomic_read (skipped_count()) > MAXSKIPPED)) {
  1161.     if (unlikely (pseudo_atomic_cmpxchg(session_state(), STAP_SESSION_RUNNING, STAP_SESSION_ERROR) == STAP_SESSION_RUNNING))
  1162.     _stp_error ("Skipped too many probes, check MAXSKIPPED or try again with stap -t for more details.");
  1163.   }
  1164.   #if INTERRUPTIBLE
  1165.   preempt_enable_no_resched ();
  1166.   #else
  1167.   local_irq_restore (flags);
  1168.   #endif
  1169.   #endif // STP_ALIBI
  1170.   return 0;
  1171. }
  1172.  
  1173. static int enter_kretprobe_common (struct kretprobe_instance *inst, struct pt_regs *regs, int entry) {
  1174.   struct kretprobe *krp = inst->rp;
  1175.   int kprobe_idx = ((uintptr_t)krp-(uintptr_t)stap_dwarf_kprobes)/sizeof(struct stap_dwarf_kprobe);
  1176.   struct stap_dwarf_probe *sdp = &stap_dwarf_probes[((kprobe_idx >= 0 && kprobe_idx < 41)?kprobe_idx:0)];
  1177.   const struct stap_probe *sp = entry ? sdp->entry_probe : sdp->probe;
  1178.   if (sp) {
  1179.     #ifdef STP_ALIBI
  1180.     atomic_inc(probe_alibi(sp->index));
  1181.     #else
  1182.     struct context* __restrict__ c;
  1183.     #if !INTERRUPTIBLE
  1184.     unsigned long flags;
  1185.     #endif
  1186.     #ifdef STP_TIMING
  1187.     Stat stat = probe_timing(sp->index);
  1188.     #endif
  1189.     #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1190.     cycles_t cycles_atstart = get_cycles ();
  1191.     #endif
  1192.     #if INTERRUPTIBLE
  1193.     preempt_disable ();
  1194.     #else
  1195.     local_irq_save (flags);
  1196.     #endif
  1197.     if (unlikely ((((unsigned long) (& c)) & (THREAD_SIZE-1))
  1198.       < (MINSTACKSPACE + sizeof (struct thread_info)))) {
  1199.       atomic_inc (skipped_count());
  1200.       #ifdef STP_TIMING
  1201.       atomic_inc (skipped_count_lowstack());
  1202.       #endif
  1203.       goto probe_epilogue;
  1204.     }
  1205.     if (atomic_read (session_state()) != STAP_SESSION_RUNNING)
  1206.       goto probe_epilogue;
  1207.     c = _stp_runtime_entryfn_get_context();
  1208.     if (atomic_inc_return (& c->busy) != 1) {
  1209.       #if !INTERRUPTIBLE
  1210.       atomic_inc (skipped_count());
  1211.       #endif
  1212.       #ifdef STP_TIMING
  1213.       atomic_inc (skipped_count_reentrant());
  1214.       #ifdef DEBUG_REENTRANCY
  1215.       _stp_warn ("Skipped %s due to %s residency on cpu %u\n", sp->pp, c->probe_point ?: "?", smp_processor_id());
  1216.       #endif
  1217.       #endif
  1218.       atomic_dec (& c->busy);
  1219.       goto probe_epilogue;
  1220.     }
  1221.    
  1222.     c->last_stmt = 0;
  1223.     c->last_error = 0;
  1224.     c->nesting = -1;
  1225.     c->uregs = 0;
  1226.     c->kregs = 0;
  1227.     #if defined __ia64__
  1228.     c->unwaddr = 0;
  1229.     #endif
  1230.     c->probe_point = sp->pp;
  1231.     #ifdef STP_NEED_PROBE_NAME
  1232.     c->probe_name = sp->pn;
  1233.     #endif
  1234.     c->probe_type = stp_probe_type_kretprobe;
  1235.     memset(&c->ips, 0, sizeof(c->ips));
  1236.     c->user_mode_p = 0; c->full_uregs_p = 0;
  1237.     #ifdef STAP_NEED_REGPARM
  1238.     c->regparm = 0;
  1239.     #endif
  1240.     #if INTERRUPTIBLE
  1241.     c->actionremaining = MAXACTION_INTERRUPTIBLE;
  1242.     #else
  1243.     c->actionremaining = MAXACTION;
  1244.     #endif
  1245.     #if defined(STP_NEED_UNWIND_DATA)
  1246.     c->uwcache_user.state = uwcache_uninitialized;
  1247.     c->uwcache_kernel.state = uwcache_uninitialized;
  1248.     #endif
  1249.     c->kregs = regs;
  1250.     c->ips.krp.pi = inst;
  1251.     c->ips.krp.pi_longs = sdp->saved_longs;
  1252.     {
  1253.       unsigned long kprobes_ip = REG_IP(c->kregs);
  1254.       if (entry)
  1255.         SET_REG_IP(regs, (unsigned long) inst->rp->kp.addr);
  1256.       else
  1257.         SET_REG_IP(regs, (unsigned long)inst->ret_addr);
  1258.       (sp->ph) (c);
  1259.       SET_REG_IP(regs, kprobes_ip);
  1260.     }
  1261.     #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1262.     {
  1263.       cycles_t cycles_atend = get_cycles ();
  1264.       int32_t cycles_elapsed = ((int32_t)cycles_atend > (int32_t)cycles_atstart)
  1265.         ? ((int32_t)cycles_atend - (int32_t)cycles_atstart)
  1266.         : (~(int32_t)0) - (int32_t)cycles_atstart + (int32_t)cycles_atend + 1;
  1267.       #ifdef STP_TIMING
  1268.       if (likely (stat)) _stp_stat_add(stat, cycles_elapsed);
  1269.       #endif
  1270.       #ifdef STP_OVERLOAD
  1271.       {
  1272.         cycles_t interval = (cycles_atend > c->cycles_base)
  1273.           ? (cycles_atend - c->cycles_base)
  1274.           : (STP_OVERLOAD_INTERVAL + 1);
  1275.         c->cycles_sum += cycles_elapsed;
  1276.         if (interval > STP_OVERLOAD_INTERVAL) {
  1277.           if (c->cycles_sum > STP_OVERLOAD_THRESHOLD) {
  1278.             _stp_error ("probe overhead exceeded threshold");
  1279.             atomic_set (session_state(), STAP_SESSION_ERROR);
  1280.             atomic_inc (error_count());
  1281.           }
  1282.           c->cycles_base = cycles_atend;
  1283.           c->cycles_sum = 0;
  1284.         }
  1285.       }
  1286.       #endif
  1287.     }
  1288.     #endif
  1289.     c->probe_point = 0;
  1290.     #ifdef STP_NEED_PROBE_NAME
  1291.     c->probe_name = 0;
  1292.     #endif
  1293.     c->probe_type = 0;
  1294.     if (unlikely (c->last_error && c->last_error[0])) {
  1295.       if (c->last_stmt != NULL)
  1296.         _stp_softerror ("%s near %s", c->last_error, c->last_stmt);
  1297.       else
  1298.         _stp_softerror ("%s", c->last_error);
  1299.       atomic_inc (error_count());
  1300.       if (atomic_read (error_count()) > MAXERRORS) {
  1301.         atomic_set (session_state(), STAP_SESSION_ERROR);
  1302.         _stp_exit ();
  1303.       }
  1304.     }
  1305.     atomic_dec (&c->busy);
  1306.   probe_epilogue:
  1307.     _stp_runtime_entryfn_put_context();
  1308.     if (unlikely (atomic_read (skipped_count()) > MAXSKIPPED)) {
  1309.       if (unlikely (pseudo_atomic_cmpxchg(session_state(), STAP_SESSION_RUNNING, STAP_SESSION_ERROR) == STAP_SESSION_RUNNING))
  1310.       _stp_error ("Skipped too many probes, check MAXSKIPPED or try again with stap -t for more details.");
  1311.     }
  1312.     #if INTERRUPTIBLE
  1313.     preempt_enable_no_resched ();
  1314.     #else
  1315.     local_irq_restore (flags);
  1316.     #endif
  1317.     #endif // STP_ALIBI
  1318.   }
  1319.   return 0;
  1320. }
  1321.  
  1322. static int enter_kretprobe_probe (struct kretprobe_instance *inst, struct pt_regs *regs) {
  1323.   return enter_kretprobe_common(inst, regs, 0);
  1324. }
  1325.  
  1326. static int enter_kretprobe_entry_probe (struct kretprobe_instance *inst, struct pt_regs *regs) {
  1327.   return enter_kretprobe_common(inst, regs, 1);
  1328. }
  1329.  
  1330. /* ---- hrtimer probes ---- */
  1331. #include "timer.c"
  1332. static struct stap_hrtimer_probe stap_hrtimer_probes [1] = {
  1333.   { .probe=(&stap_probes[41]), .intrv=100000000LL, .rnd=0LL },
  1334. };
  1335.  
  1336. static hrtimer_return_t _stp_hrtimer_notify_function (struct hrtimer *timer) {
  1337.   int rc = HRTIMER_NORESTART;
  1338.   struct stap_hrtimer_probe *stp = container_of(timer, struct stap_hrtimer_probe, hrtimer);
  1339.   if ((atomic_read (session_state()) == STAP_SESSION_STARTING) ||
  1340.       (atomic_read (session_state()) == STAP_SESSION_RUNNING)) {
  1341.     _stp_hrtimer_update(stp);
  1342.     rc = HRTIMER_RESTART;
  1343.   }
  1344.   {
  1345.     #ifdef STP_ALIBI
  1346.     atomic_inc(probe_alibi(stp->probe->index));
  1347.     #else
  1348.     struct context* __restrict__ c;
  1349.     #if !INTERRUPTIBLE
  1350.     unsigned long flags;
  1351.     #endif
  1352.     #ifdef STP_TIMING
  1353.     Stat stat = probe_timing(stp->probe->index);
  1354.     #endif
  1355.     #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1356.     cycles_t cycles_atstart = get_cycles ();
  1357.     #endif
  1358.     #if INTERRUPTIBLE
  1359.     preempt_disable ();
  1360.     #else
  1361.     local_irq_save (flags);
  1362.     #endif
  1363.     if (unlikely ((((unsigned long) (& c)) & (THREAD_SIZE-1))
  1364.       < (MINSTACKSPACE + sizeof (struct thread_info)))) {
  1365.       atomic_inc (skipped_count());
  1366.       #ifdef STP_TIMING
  1367.       atomic_inc (skipped_count_lowstack());
  1368.       #endif
  1369.       goto probe_epilogue;
  1370.     }
  1371.     if (atomic_read (session_state()) != STAP_SESSION_RUNNING)
  1372.       goto probe_epilogue;
  1373.     c = _stp_runtime_entryfn_get_context();
  1374.     if (atomic_inc_return (& c->busy) != 1) {
  1375.       #if !INTERRUPTIBLE
  1376.       atomic_inc (skipped_count());
  1377.       #endif
  1378.       #ifdef STP_TIMING
  1379.       atomic_inc (skipped_count_reentrant());
  1380.       #ifdef DEBUG_REENTRANCY
  1381.       _stp_warn ("Skipped %s due to %s residency on cpu %u\n", stp->probe->pp, c->probe_point ?: "?", smp_processor_id());
  1382.       #endif
  1383.       #endif
  1384.       atomic_dec (& c->busy);
  1385.       goto probe_epilogue;
  1386.     }
  1387.    
  1388.     c->last_stmt = 0;
  1389.     c->last_error = 0;
  1390.     c->nesting = -1;
  1391.     c->uregs = 0;
  1392.     c->kregs = 0;
  1393.     #if defined __ia64__
  1394.     c->unwaddr = 0;
  1395.     #endif
  1396.     c->probe_point = stp->probe->pp;
  1397.     #ifdef STP_NEED_PROBE_NAME
  1398.     c->probe_name = stp->probe->pn;
  1399.     #endif
  1400.     c->probe_type = stp_probe_type_hrtimer;
  1401.     memset(&c->ips, 0, sizeof(c->ips));
  1402.     c->user_mode_p = 0; c->full_uregs_p = 0;
  1403.     #ifdef STAP_NEED_REGPARM
  1404.     c->regparm = 0;
  1405.     #endif
  1406.     #if INTERRUPTIBLE
  1407.     c->actionremaining = MAXACTION_INTERRUPTIBLE;
  1408.     #else
  1409.     c->actionremaining = MAXACTION;
  1410.     #endif
  1411.     #if defined(STP_NEED_UNWIND_DATA)
  1412.     c->uwcache_user.state = uwcache_uninitialized;
  1413.     c->uwcache_kernel.state = uwcache_uninitialized;
  1414.     #endif
  1415.     (*stp->probe->ph) (c);
  1416.     #if defined(STP_TIMING) || defined(STP_OVERLOAD)
  1417.     {
  1418.       cycles_t cycles_atend = get_cycles ();
  1419.       int32_t cycles_elapsed = ((int32_t)cycles_atend > (int32_t)cycles_atstart)
  1420.         ? ((int32_t)cycles_atend - (int32_t)cycles_atstart)
  1421.         : (~(int32_t)0) - (int32_t)cycles_atstart + (int32_t)cycles_atend + 1;
  1422.       #ifdef STP_TIMING
  1423.       if (likely (stat)) _stp_stat_add(stat, cycles_elapsed);
  1424.       #endif
  1425.       #ifdef STP_OVERLOAD
  1426.       {
  1427.         cycles_t interval = (cycles_atend > c->cycles_base)
  1428.           ? (cycles_atend - c->cycles_base)
  1429.           : (STP_OVERLOAD_INTERVAL + 1);
  1430.         c->cycles_sum += cycles_elapsed;
  1431.         if (interval > STP_OVERLOAD_INTERVAL) {
  1432.           if (c->cycles_sum > STP_OVERLOAD_THRESHOLD) {
  1433.             _stp_error ("probe overhead exceeded threshold");
  1434.             atomic_set (session_state(), STAP_SESSION_ERROR);
  1435.             atomic_inc (error_count());
  1436.           }
  1437.           c->cycles_base = cycles_atend;
  1438.           c->cycles_sum = 0;
  1439.         }
  1440.       }
  1441.       #endif
  1442.     }
  1443.     #endif
  1444.     c->probe_point = 0;
  1445.     #ifdef STP_NEED_PROBE_NAME
  1446.     c->probe_name = 0;
  1447.     #endif
  1448.     c->probe_type = 0;
  1449.     if (unlikely (c->last_error && c->last_error[0])) {
  1450.       if (c->last_stmt != NULL)
  1451.         _stp_softerror ("%s near %s", c->last_error, c->last_stmt);
  1452.       else
  1453.         _stp_softerror ("%s", c->last_error);
  1454.       atomic_inc (error_count());
  1455.       if (atomic_read (error_count()) > MAXERRORS) {
  1456.         atomic_set (session_state(), STAP_SESSION_ERROR);
  1457.         _stp_exit ();
  1458.       }
  1459.     }
  1460.     atomic_dec (&c->busy);
  1461.   probe_epilogue:
  1462.     _stp_runtime_entryfn_put_context();
  1463.     if (unlikely (atomic_read (skipped_count()) > MAXSKIPPED)) {
  1464.       if (unlikely (pseudo_atomic_cmpxchg(session_state(), STAP_SESSION_RUNNING, STAP_SESSION_ERROR) == STAP_SESSION_RUNNING))
  1465.       _stp_error ("Skipped too many probes, check MAXSKIPPED or try again with stap -t for more details.");
  1466.     }
  1467.     #if INTERRUPTIBLE
  1468.     preempt_enable_no_resched ();
  1469.     #else
  1470.     local_irq_restore (flags);
  1471.     #endif
  1472.     #endif // STP_ALIBI
  1473.   }
  1474.   return rc;
  1475. }
  1476.  
  1477. static int systemtap_module_init (void) {
  1478.   int rc = 0;
  1479.   int cpu;
  1480.   int i=0, j=0;
  1481.   const char *probe_point = "";
  1482.   {
  1483.     const char* release = UTS_RELEASE;
  1484.     #ifdef STAPCONF_GENERATED_COMPILE
  1485.     const char* version = UTS_VERSION;
  1486.     #endif
  1487.     if (strcmp (release, "3.11.0-rc5")) {
  1488.       _stp_error ("module release mismatch (%s vs %s)", release, "3.11.0-rc5");
  1489.       rc = -EINVAL;
  1490.     }
  1491.     #ifdef STAPCONF_GENERATED_COMPILE
  1492.     if (strcmp (utsname()->version, version)) {
  1493.       _stp_error ("module version mismatch (%s vs %s), release %s", version, utsname()->version, release);
  1494.       rc = -EINVAL;
  1495.     }
  1496.     #endif
  1497.     if (_stp_module_check()) rc = -EINVAL;
  1498.     if (_stp_privilege_credentials == 0) {
  1499.       if (STP_PRIVILEGE_CONTAINS(STP_PRIVILEGE, STP_PR_STAPDEV) ||
  1500.           STP_PRIVILEGE_CONTAINS(STP_PRIVILEGE, STP_PR_STAPUSR)) {
  1501.         _stp_privilege_credentials = STP_PRIVILEGE;
  1502.         #ifdef DEBUG_PRIVILEGE
  1503.           _dbug("User's privilege credentials default to %s\n",
  1504.                 privilege_to_text(_stp_privilege_credentials));
  1505.         #endif
  1506.       }
  1507.       else {
  1508.         _stp_error ("Unable to verify that you have the required privilege credentials to run this module (%s required). You must use staprun version 1.7 or higher.",
  1509.                     privilege_to_text(STP_PRIVILEGE));
  1510.         rc = -EINVAL;
  1511.       }
  1512.     }
  1513.     else {
  1514.       #ifdef DEBUG_PRIVILEGE
  1515.         _dbug("User's privilege credentials provided as %s\n",
  1516.               privilege_to_text(_stp_privilege_credentials));
  1517.       #endif
  1518.       if (! STP_PRIVILEGE_CONTAINS(_stp_privilege_credentials, STP_PRIVILEGE)) {
  1519.         _stp_error ("Your privilege credentials (%s) are insufficient to run this module (%s required).",
  1520.                     privilege_to_text(_stp_privilege_credentials), privilege_to_text(STP_PRIVILEGE));
  1521.         rc = -EINVAL;
  1522.       }
  1523.     }
  1524.   }
  1525.   if (rc) goto out;
  1526.   rc = stp_session_init();
  1527.   if (rc) {
  1528.     _stp_error ("couldn't initialize the main session (rc %d)", rc);
  1529.     goto out;
  1530.   }
  1531.   #ifdef STAP_NEED_GETTIMEOFDAY
  1532.   rc = _stp_init_time();
  1533.   if (rc) {
  1534.     _stp_error ("couldn't initialize gettimeofday");
  1535.     goto out;
  1536.   }
  1537.   #endif
  1538.   (void) probe_point;
  1539.   (void) i;
  1540.   (void) j;
  1541.   atomic_set (session_state(), STAP_SESSION_STARTING);
  1542.   rc = _stp_runtime_contexts_alloc();
  1543.   if (rc != 0)
  1544.     goto out;
  1545.   global_set(s_called, _stp_map_new_ssi (MAXMAPENTRIES, 0)); if (global(s_called) == NULL) rc = -ENOMEM;
  1546.   if (rc) {
  1547.     _stp_error ("global variable 'called' allocation failed");
  1548.     goto out;
  1549.   }
  1550.   global_lock_init(s_called);
  1551.   #ifdef STP_TIMING
  1552.   atomic_set(global_skipped(s_called), 0);
  1553.   #endif
  1554.  
  1555.   if (rc) {
  1556.     _stp_error ("global variable 'num_polls' allocation failed");
  1557.     goto out;
  1558.   }
  1559.   global_lock_init(s_num_polls);
  1560.   #ifdef STP_TIMING
  1561.   atomic_set(global_skipped(s_num_polls), 0);
  1562.   #endif
  1563.   _stp_print_kernel_info("2.2.1/0.155", (num_online_cpus() * sizeof(struct context)), 43);
  1564.   for (i=0; i<1; i++) {
  1565.     struct stap_be_probe* stp = & stap_be_probes [i];
  1566.     if (stp->type == 0)
  1567.       enter_be_probe (stp); /* rc = 0 */
  1568.   }
  1569.   if (rc) {
  1570.     if (probe_point)
  1571.       _stp_error ("probe %s registration error (rc %d)", probe_point, rc);
  1572.     atomic_set (session_state(), STAP_SESSION_ERROR);
  1573.     goto out;
  1574.   }
  1575.   for (i=0; i<41; i++) {
  1576.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1577.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1578.     unsigned long relocated_addr = _stp_kmodule_relocate (sdp->module, sdp->section, sdp->address);
  1579.     if (relocated_addr == 0) continue;
  1580.     probe_point = sdp->probe->pp;
  1581.     if (sdp->return_p) {
  1582.       kp->u.krp.kp.addr = (void *) relocated_addr;
  1583.       if (sdp->maxactive_p) {
  1584.         kp->u.krp.maxactive = sdp->maxactive_val;
  1585.       } else {
  1586.         kp->u.krp.maxactive = KRETACTIVE;
  1587.       }
  1588.       kp->u.krp.handler = &enter_kretprobe_probe;
  1589.       #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
  1590.       if (sdp->entry_probe) {
  1591.         kp->u.krp.entry_handler = &enter_kretprobe_entry_probe;
  1592.         kp->u.krp.data_size = sdp->saved_longs * sizeof(int64_t) +
  1593.                               sdp->saved_strings * MAXSTRINGLEN;
  1594.       }
  1595.       #endif
  1596.       #ifdef __ia64__
  1597.       kp->dummy.addr = kp->u.krp.kp.addr;
  1598.       kp->dummy.pre_handler = NULL;
  1599.       rc = register_kprobe (& kp->dummy);
  1600.       if (rc == 0) {
  1601.         rc = register_kretprobe (& kp->u.krp);
  1602.         if (rc != 0)
  1603.           unregister_kprobe (& kp->dummy);
  1604.       }
  1605.       #else
  1606.       rc = register_kretprobe (& kp->u.krp);
  1607.       #endif
  1608.     } else {
  1609.       kp->u.kp.addr = (void *) relocated_addr;
  1610.       kp->u.kp.pre_handler = &enter_kprobe_probe;
  1611.       #ifdef __ia64__
  1612.       kp->dummy.addr = kp->u.kp.addr;
  1613.       kp->dummy.pre_handler = NULL;
  1614.       rc = register_kprobe (& kp->dummy);
  1615.       if (rc == 0) {
  1616.         rc = register_kprobe (& kp->u.kp);
  1617.         if (rc != 0)
  1618.           unregister_kprobe (& kp->dummy);
  1619.       }
  1620.       #else
  1621.       rc = register_kprobe (& kp->u.kp);
  1622.       #endif
  1623.     }
  1624.     if (rc) {
  1625.       sdp->registered_p = 0;
  1626.       if (!sdp->optional_p)
  1627.         _stp_warn ("probe %s (address 0x%lx) registration error (rc %d)", probe_point, (unsigned long) relocated_addr, rc);
  1628.       rc = 0;
  1629.     }
  1630.     else sdp->registered_p = 1;
  1631.   }
  1632.   if (rc) {
  1633.     if (probe_point)
  1634.       _stp_error ("probe %s registration error (rc %d)", probe_point, rc);
  1635.     atomic_set (session_state(), STAP_SESSION_ERROR);
  1636.     for (i=0; i<1; i++) {
  1637.       struct stap_be_probe* stp = & stap_be_probes [i];
  1638.       if (stp->type == 1)
  1639.         enter_be_probe (stp);
  1640.     }
  1641.     for (i=0; i<1; i++) {
  1642.       struct stap_be_probe* stp = & stap_be_probes [i];
  1643.       if (stp->type == 2)
  1644.         enter_be_probe (stp);
  1645.     }
  1646.     goto out;
  1647.   }
  1648.   _stp_hrtimer_init();
  1649.   for (i=0; i<1; i++) {
  1650.     struct stap_hrtimer_probe* stp = & stap_hrtimer_probes [i];
  1651.     probe_point = stp->probe->pp;
  1652.     rc = _stp_hrtimer_create(stp, _stp_hrtimer_notify_function);
  1653.     if (rc) {
  1654.       for (j=i-1; j>=0; j--)
  1655.         _stp_hrtimer_cancel(& stap_hrtimer_probes[j]);
  1656.       break;
  1657.     }
  1658.   }
  1659.   if (rc) {
  1660.     if (probe_point)
  1661.       _stp_error ("probe %s registration error (rc %d)", probe_point, rc);
  1662.     atomic_set (session_state(), STAP_SESSION_ERROR);
  1663.     #if defined(STAPCONF_UNREGISTER_KPROBES)
  1664.     j = 0;
  1665.     for (i=0; i<41; i++) {
  1666.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1667.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1668.       if (! sdp->registered_p) continue;
  1669.       if (!sdp->return_p)
  1670.         stap_unreg_kprobes[j++] = &kp->u.kp;
  1671.     }
  1672.     unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1673.     j = 0;
  1674.     for (i=0; i<41; i++) {
  1675.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1676.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1677.       if (! sdp->registered_p) continue;
  1678.       if (sdp->return_p)
  1679.         stap_unreg_kprobes[j++] = &kp->u.krp;
  1680.     }
  1681.     unregister_kretprobes((struct kretprobe **)stap_unreg_kprobes, j);
  1682.     #ifdef __ia64__
  1683.     j = 0;
  1684.     for (i=0; i<41; i++) {
  1685.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1686.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1687.       if (! sdp->registered_p) continue;
  1688.       stap_unreg_kprobes[j++] = &kp->dummy;
  1689.     }
  1690.     unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1691.     #endif
  1692.     #endif
  1693.     for (i=0; i<41; i++) {
  1694.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1695.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1696.       if (! sdp->registered_p) continue;
  1697.       if (sdp->return_p) {
  1698.       #if !defined(STAPCONF_UNREGISTER_KPROBES)
  1699.         unregister_kretprobe (&kp->u.krp);
  1700.         #endif
  1701.         atomic_add (kp->u.krp.nmissed, skipped_count());
  1702.         #ifdef STP_TIMING
  1703.         if (kp->u.krp.nmissed)
  1704.           _stp_warn ("Skipped due to missed kretprobe/1 on '%s': %d\n", sdp->probe->pp, kp->u.krp.nmissed);
  1705.         #endif
  1706.         atomic_add (kp->u.krp.kp.nmissed, skipped_count());
  1707.         #ifdef STP_TIMING
  1708.         if (kp->u.krp.kp.nmissed)
  1709.           _stp_warn ("Skipped due to missed kretprobe/2 on '%s': %lu\n", sdp->probe->pp, kp->u.krp.kp.nmissed);
  1710.         #endif
  1711.       } else {
  1712.       #if !defined(STAPCONF_UNREGISTER_KPROBES)
  1713.         unregister_kprobe (&kp->u.kp);
  1714.         #endif
  1715.         atomic_add (kp->u.kp.nmissed, skipped_count());
  1716.         #ifdef STP_TIMING
  1717.         if (kp->u.kp.nmissed)
  1718.           _stp_warn ("Skipped due to missed kprobe on '%s': %lu\n", sdp->probe->pp, kp->u.kp.nmissed);
  1719.         #endif
  1720.       }
  1721.       #if !defined(STAPCONF_UNREGISTER_KPROBES) && defined(__ia64__)
  1722.       unregister_kprobe (&kp->dummy);
  1723.       #endif
  1724.       sdp->registered_p = 0;
  1725.     }
  1726.     for (i=0; i<1; i++) {
  1727.       struct stap_be_probe* stp = & stap_be_probes [i];
  1728.       if (stp->type == 1)
  1729.         enter_be_probe (stp);
  1730.     }
  1731.     for (i=0; i<1; i++) {
  1732.       struct stap_be_probe* stp = & stap_be_probes [i];
  1733.       if (stp->type == 2)
  1734.         enter_be_probe (stp);
  1735.     }
  1736.     goto out;
  1737.   }
  1738.  
  1739.   /* ---- vma tracker ---- */
  1740.   rc = _stp_vma_init();
  1741.  
  1742.   /* ---- task finder ---- */
  1743.   if (rc == 0) {
  1744.     rc = stap_start_task_finder();
  1745.     if (rc) {
  1746.       stap_stop_task_finder();
  1747.     }
  1748.   }
  1749.   if (rc) {
  1750.     if (probe_point)
  1751.       _stp_error ("probe %s registration error (rc %d)", probe_point, rc);
  1752.     atomic_set (session_state(), STAP_SESSION_ERROR);
  1753.     for (i=0; i<1; i++)
  1754.       _stp_hrtimer_cancel(& stap_hrtimer_probes[i]);
  1755.     #if defined(STAPCONF_UNREGISTER_KPROBES)
  1756.     j = 0;
  1757.     for (i=0; i<41; i++) {
  1758.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1759.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1760.       if (! sdp->registered_p) continue;
  1761.       if (!sdp->return_p)
  1762.         stap_unreg_kprobes[j++] = &kp->u.kp;
  1763.     }
  1764.     unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1765.     j = 0;
  1766.     for (i=0; i<41; i++) {
  1767.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1768.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1769.       if (! sdp->registered_p) continue;
  1770.       if (sdp->return_p)
  1771.         stap_unreg_kprobes[j++] = &kp->u.krp;
  1772.     }
  1773.     unregister_kretprobes((struct kretprobe **)stap_unreg_kprobes, j);
  1774.     #ifdef __ia64__
  1775.     j = 0;
  1776.     for (i=0; i<41; i++) {
  1777.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1778.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1779.       if (! sdp->registered_p) continue;
  1780.       stap_unreg_kprobes[j++] = &kp->dummy;
  1781.     }
  1782.     unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1783.     #endif
  1784.     #endif
  1785.     for (i=0; i<41; i++) {
  1786.       struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1787.       struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1788.       if (! sdp->registered_p) continue;
  1789.       if (sdp->return_p) {
  1790.       #if !defined(STAPCONF_UNREGISTER_KPROBES)
  1791.         unregister_kretprobe (&kp->u.krp);
  1792.         #endif
  1793.         atomic_add (kp->u.krp.nmissed, skipped_count());
  1794.         #ifdef STP_TIMING
  1795.         if (kp->u.krp.nmissed)
  1796.           _stp_warn ("Skipped due to missed kretprobe/1 on '%s': %d\n", sdp->probe->pp, kp->u.krp.nmissed);
  1797.         #endif
  1798.         atomic_add (kp->u.krp.kp.nmissed, skipped_count());
  1799.         #ifdef STP_TIMING
  1800.         if (kp->u.krp.kp.nmissed)
  1801.           _stp_warn ("Skipped due to missed kretprobe/2 on '%s': %lu\n", sdp->probe->pp, kp->u.krp.kp.nmissed);
  1802.         #endif
  1803.       } else {
  1804.       #if !defined(STAPCONF_UNREGISTER_KPROBES)
  1805.         unregister_kprobe (&kp->u.kp);
  1806.         #endif
  1807.         atomic_add (kp->u.kp.nmissed, skipped_count());
  1808.         #ifdef STP_TIMING
  1809.         if (kp->u.kp.nmissed)
  1810.           _stp_warn ("Skipped due to missed kprobe on '%s': %lu\n", sdp->probe->pp, kp->u.kp.nmissed);
  1811.         #endif
  1812.       }
  1813.       #if !defined(STAPCONF_UNREGISTER_KPROBES) && defined(__ia64__)
  1814.       unregister_kprobe (&kp->dummy);
  1815.       #endif
  1816.       sdp->registered_p = 0;
  1817.     }
  1818.     for (i=0; i<1; i++) {
  1819.       struct stap_be_probe* stp = & stap_be_probes [i];
  1820.       if (stp->type == 1)
  1821.         enter_be_probe (stp);
  1822.     }
  1823.     for (i=0; i<1; i++) {
  1824.       struct stap_be_probe* stp = & stap_be_probes [i];
  1825.       if (stp->type == 2)
  1826.         enter_be_probe (stp);
  1827.     }
  1828.     goto out;
  1829.   }
  1830.   if (atomic_read (session_state()) == STAP_SESSION_STARTING)
  1831.     atomic_set (session_state(), STAP_SESSION_RUNNING);
  1832.     /* ---- task finder ---- */
  1833.     stap_task_finder_post_init();
  1834.   return 0;
  1835. out:
  1836.   _stp_map_del (global(s_called));
  1837.  
  1838.   atomic_set (session_state(), STAP_SESSION_STOPPED);
  1839.   #ifdef STAPCONF_SYNCHRONIZE_SCHED
  1840.   synchronize_sched();
  1841.   #endif
  1842.   #ifdef STAP_NEED_GETTIMEOFDAY
  1843.    _stp_kill_time();
  1844.   #endif
  1845.   _stp_runtime_contexts_free();
  1846.   return rc;
  1847. }
  1848.  
  1849.  
  1850. static void systemtap_module_refresh (void) {
  1851.   int i=0, j=0;
  1852.   (void) i;
  1853.   (void) j;
  1854.   for (i=0; i<41; i++) {
  1855.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1856.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1857.     unsigned long relocated_addr = _stp_kmodule_relocate (sdp->module, sdp->section, sdp->address);
  1858.     int rc;
  1859.     if (sdp->registered_p == 0 && relocated_addr != 0) {
  1860.       if (sdp->return_p) {
  1861.         kp->u.krp.kp.addr = (void *) relocated_addr;
  1862.         if (sdp->maxactive_p) {
  1863.           kp->u.krp.maxactive = sdp->maxactive_val;
  1864.         } else {
  1865.           kp->u.krp.maxactive = KRETACTIVE;
  1866.         }
  1867.         kp->u.krp.handler = &enter_kretprobe_probe;
  1868.         #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
  1869.         if (sdp->entry_probe) {
  1870.           kp->u.krp.entry_handler = &enter_kretprobe_entry_probe;
  1871.           kp->u.krp.data_size = sdp->saved_longs * sizeof(int64_t) +
  1872.                                 sdp->saved_strings * MAXSTRINGLEN;
  1873.         }
  1874.         #endif
  1875.         #ifdef __ia64__
  1876.         kp->dummy.addr = kp->u.krp.kp.addr;
  1877.         kp->dummy.pre_handler = NULL;
  1878.         rc = register_kprobe (& kp->dummy);
  1879.         if (rc == 0) {
  1880.           rc = register_kretprobe (& kp->u.krp);
  1881.           if (rc != 0)
  1882.             unregister_kprobe (& kp->dummy);
  1883.         }
  1884.         #else
  1885.         rc = register_kretprobe (& kp->u.krp);
  1886.         #endif
  1887.       } else {
  1888.         kp->u.kp.addr = (void *) relocated_addr;
  1889.         kp->u.kp.pre_handler = &enter_kprobe_probe;
  1890.         #ifdef __ia64__
  1891.         kp->dummy.addr = kp->u.kp.addr;
  1892.         kp->dummy.pre_handler = NULL;
  1893.         rc = register_kprobe (& kp->dummy);
  1894.         if (rc == 0) {
  1895.           rc = register_kprobe (& kp->u.kp);
  1896.           if (rc != 0)
  1897.             unregister_kprobe (& kp->dummy);
  1898.         }
  1899.         #else
  1900.         rc = register_kprobe (& kp->u.kp);
  1901.         #endif
  1902.       }
  1903.       if (rc == 0) sdp->registered_p = 1;
  1904.     } else if (sdp->registered_p == 1 && relocated_addr == 0) {
  1905.       if (sdp->return_p) {
  1906.         unregister_kretprobe (&kp->u.krp);
  1907.         atomic_add (kp->u.krp.nmissed, skipped_count());
  1908.         #ifdef STP_TIMING
  1909.         if (kp->u.krp.nmissed)
  1910.           _stp_warn ("Skipped due to missed kretprobe/1 on '%s': %d\n", sdp->probe->pp, kp->u.krp.nmissed);
  1911.         #endif
  1912.         atomic_add (kp->u.krp.kp.nmissed, skipped_count());
  1913.         #ifdef STP_TIMING
  1914.         if (kp->u.krp.kp.nmissed)
  1915.           _stp_warn ("Skipped due to missed kretprobe/2 on '%s': %lu\n", sdp->probe->pp, kp->u.krp.kp.nmissed);
  1916.         #endif
  1917.       } else {
  1918.         unregister_kprobe (&kp->u.kp);
  1919.         atomic_add (kp->u.kp.nmissed, skipped_count());
  1920.         #ifdef STP_TIMING
  1921.         if (kp->u.kp.nmissed)
  1922.           _stp_warn ("Skipped due to missed kprobe on '%s': %lu\n", sdp->probe->pp, kp->u.kp.nmissed);
  1923.         #endif
  1924.       }
  1925.       #if defined(__ia64__)
  1926.       unregister_kprobe (&kp->dummy);
  1927.       #endif
  1928.       sdp->registered_p = 0;
  1929.     }
  1930.   }
  1931. }
  1932.  
  1933.  
  1934. static void systemtap_module_exit (void) {
  1935.   int i=0, j=0;
  1936.   (void) i;
  1937.   (void) j;
  1938.   if (atomic_read (session_state()) == STAP_SESSION_STARTING)
  1939.     return;
  1940.   if (atomic_read (session_state()) == STAP_SESSION_RUNNING)
  1941.     atomic_set (session_state(), STAP_SESSION_STOPPING);
  1942.  
  1943.   /* ---- task finder ---- */
  1944.   stap_stop_task_finder();
  1945.  
  1946.   /* ---- vma tracker ---- */
  1947.   _stp_vma_done();
  1948.   for (i=0; i<1; i++)
  1949.     _stp_hrtimer_cancel(& stap_hrtimer_probes[i]);
  1950.   #if defined(STAPCONF_UNREGISTER_KPROBES)
  1951.   j = 0;
  1952.   for (i=0; i<41; i++) {
  1953.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1954.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1955.     if (! sdp->registered_p) continue;
  1956.     if (!sdp->return_p)
  1957.       stap_unreg_kprobes[j++] = &kp->u.kp;
  1958.   }
  1959.   unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1960.   j = 0;
  1961.   for (i=0; i<41; i++) {
  1962.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1963.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1964.     if (! sdp->registered_p) continue;
  1965.     if (sdp->return_p)
  1966.       stap_unreg_kprobes[j++] = &kp->u.krp;
  1967.   }
  1968.   unregister_kretprobes((struct kretprobe **)stap_unreg_kprobes, j);
  1969.   #ifdef __ia64__
  1970.   j = 0;
  1971.   for (i=0; i<41; i++) {
  1972.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1973.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1974.     if (! sdp->registered_p) continue;
  1975.     stap_unreg_kprobes[j++] = &kp->dummy;
  1976.   }
  1977.   unregister_kprobes((struct kprobe **)stap_unreg_kprobes, j);
  1978.   #endif
  1979.   #endif
  1980.   for (i=0; i<41; i++) {
  1981.     struct stap_dwarf_probe *sdp = & stap_dwarf_probes[i];
  1982.     struct stap_dwarf_kprobe *kp = & stap_dwarf_kprobes[i];
  1983.     if (! sdp->registered_p) continue;
  1984.     if (sdp->return_p) {
  1985.     #if !defined(STAPCONF_UNREGISTER_KPROBES)
  1986.       unregister_kretprobe (&kp->u.krp);
  1987.       #endif
  1988.       atomic_add (kp->u.krp.nmissed, skipped_count());
  1989.       #ifdef STP_TIMING
  1990.       if (kp->u.krp.nmissed)
  1991.         _stp_warn ("Skipped due to missed kretprobe/1 on '%s': %d\n", sdp->probe->pp, kp->u.krp.nmissed);
  1992.       #endif
  1993.       atomic_add (kp->u.krp.kp.nmissed, skipped_count());
  1994.       #ifdef STP_TIMING
  1995.       if (kp->u.krp.kp.nmissed)
  1996.         _stp_warn ("Skipped due to missed kretprobe/2 on '%s': %lu\n", sdp->probe->pp, kp->u.krp.kp.nmissed);
  1997.       #endif
  1998.     } else {
  1999.     #if !defined(STAPCONF_UNREGISTER_KPROBES)
  2000.       unregister_kprobe (&kp->u.kp);
  2001.       #endif
  2002.       atomic_add (kp->u.kp.nmissed, skipped_count());
  2003.       #ifdef STP_TIMING
  2004.       if (kp->u.kp.nmissed)
  2005.         _stp_warn ("Skipped due to missed kprobe on '%s': %lu\n", sdp->probe->pp, kp->u.kp.nmissed);
  2006.       #endif
  2007.     }
  2008.     #if !defined(STAPCONF_UNREGISTER_KPROBES) && defined(__ia64__)
  2009.     unregister_kprobe (&kp->dummy);
  2010.     #endif
  2011.     sdp->registered_p = 0;
  2012.   }
  2013.   for (i=0; i<1; i++) {
  2014.     struct stap_be_probe* stp = & stap_be_probes [i];
  2015.     if (stp->type == 1)
  2016.       enter_be_probe (stp);
  2017.   }
  2018.   for (i=0; i<1; i++) {
  2019.     struct stap_be_probe* stp = & stap_be_probes [i];
  2020.     if (stp->type == 2)
  2021.       enter_be_probe (stp);
  2022.   }
  2023.   #ifdef STAPCONF_SYNCHRONIZE_SCHED
  2024.   synchronize_sched();
  2025.   #endif
  2026.   _stp_runtime_context_wait();
  2027.   atomic_set (session_state(), STAP_SESSION_STOPPED);
  2028.   #ifdef STAPCONF_SYNCHRONIZE_SCHED
  2029.   synchronize_sched();
  2030.   #endif
  2031.   _stp_map_del (global(s_called));
  2032.  
  2033.   _stp_runtime_contexts_free();
  2034.   #ifdef STAP_NEED_GETTIMEOFDAY
  2035.    _stp_kill_time();
  2036.   #endif
  2037.   preempt_disable();
  2038.   #if defined(STP_TIMING) || defined(STP_ALIBI)
  2039.   _stp_printf("----- probe hit report: \n");
  2040.   for (i = 0; i < ARRAY_SIZE(stap_probes); ++i) {
  2041.     const struct stap_probe *const p = &stap_probes[i];
  2042.     #ifdef STP_ALIBI
  2043.     int alibi = atomic_read(probe_alibi(i));
  2044.     if (alibi)
  2045.       _stp_printf ("%s, (%s), hits: %d,%s, index: %d\n",
  2046.           p->pp, p->location, alibi, p->derivation, i);
  2047.     #endif
  2048.     #ifdef STP_TIMING
  2049.     if (likely (probe_timing(i))) {
  2050.       struct stat_data *stats = _stp_stat_get (probe_timing(i), 0);
  2051.       if (stats->count) {
  2052.         int64_t avg = _stp_div64 (NULL, stats->sum, stats->count);
  2053.         _stp_printf ("%s, (%s), hits: %lld, cycles: %lldmin/%lldavg/%lldmax,%s, index: %d\n",
  2054.             p->pp, p->location, (long long) stats->count,
  2055.             (long long) stats->min, (long long) avg, (long long) stats->max,
  2056.             p->derivation, i);
  2057.       }
  2058.       _stp_stat_del (probe_timing(i));
  2059.     }
  2060.     #endif
  2061.   }
  2062.   _stp_print_flush();
  2063.   #endif
  2064.   if (atomic_read (skipped_count()) || atomic_read (error_count()) || atomic_read (skipped_count_reentrant())) {
  2065.     _stp_warn ("Number of errors: %d, skipped probes: %d\n", (int) atomic_read (error_count()), (int) atomic_read (skipped_count()));
  2066.     #ifdef STP_TIMING
  2067.     {
  2068.       int ctr;
  2069.       ctr = atomic_read (global_skipped(s_called));
  2070.       if (ctr) _stp_warn ("Skipped due to global '%s' lock timeout: %d\n", "called", ctr);
  2071.       ctr = atomic_read (global_skipped(s_num_polls));
  2072.       if (ctr) _stp_warn ("Skipped due to global '%s' lock timeout: %d\n", "num_polls", ctr);
  2073.       ctr = atomic_read (skipped_count_lowstack());
  2074.       if (ctr) _stp_warn ("Skipped due to low stack: %d\n", ctr);
  2075.       ctr = atomic_read (skipped_count_reentrant());
  2076.       if (ctr) _stp_warn ("Skipped due to reentrancy: %d\n", ctr);
  2077.       ctr = atomic_read (skipped_count_uprobe_reg());
  2078.       if (ctr) _stp_warn ("Skipped due to uprobe register failure: %d\n", ctr);
  2079.       ctr = atomic_read (skipped_count_uprobe_unreg());
  2080.       if (ctr) _stp_warn ("Skipped due to uprobe unregister failure: %d\n", ctr);
  2081.     }
  2082.     #endif
  2083.     _stp_print_flush();
  2084.   }
  2085.   preempt_enable_no_resched();
  2086. }
  2087.  
  2088.  
  2089. static int systemtap_kernel_module_init (void) {
  2090.   int rc = 0;
  2091.   int i=0, j=0;
  2092.   if (rc) {
  2093.     goto out;
  2094.   }
  2095.   if (rc) {
  2096.     goto out;
  2097.   }
  2098.   if (rc) {
  2099.     goto out;
  2100.   }
  2101.   if (rc) {
  2102.     goto out;
  2103.   }
  2104. out:
  2105.   return rc;
  2106. }
  2107.  
  2108.  
  2109. static void systemtap_kernel_module_exit (void) {
  2110.   int i=0, j=0;
  2111. }
  2112.  
  2113.  
  2114. #include "stap-symbols.h"
  2115. MODULE_DESCRIPTION("systemtap-generated probe");
  2116. MODULE_LICENSE("GPL");
  2117. MODULE_INFO(intree,"Y");
  2118.  
  2119. #undef called
  2120.  
  2121. #undef num_polls
  2122. module_param_named (num_polls, global(s_num_polls), int64_t, 0);
  2123. root@linaro-developer:~/systemtap-2.2.1/testsuite#
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement