Guest User

Untitled

a guest
Dec 14th, 2018
99
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 21.33 KB | None | 0 0
  1. [eamon@eamon-archpc starlte-endurance]$ diff -uNr --no-dereference --color=always kernel/sched/cpufreq_schedutil.c kernel/sched/cpufreq_blu_schedutil.c
  2. --- kernel/sched/cpufreq_schedutil.c 2018-12-14 10:57:31.879667167 +1100
  3. +++ kernel/sched/cpufreq_blu_schedutil.c 2018-12-14 17:15:00.921755191 +1100
  4. @@ -4,6 +4,9 @@
  5. * Copyright (C) 2016, Intel Corporation
  6. * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7. *
  8. + * Copyright (C) 2018, eng.stk
  9. + * changes for blu_schedutil: eng.stk <eng.stk@sapo.pt>
  10. + *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. @@ -12,11 +15,8 @@
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16.  
  17. #include <linux/cpufreq.h>
  18. +#include <linux/kthread.h>
  19. #include <linux/slab.h>
  20. -#include <linux/of.h>
  21. -#include <linux/cpu_pm.h>
  22. -
  23. -#include <trace/events/power.h>
  24.  
  25. #include "sched.h"
  26. #include "tune.h"
  27. @@ -29,6 +29,7 @@
  28. #define cpufreq_enable_fast_switch(x)
  29. #define cpufreq_disable_fast_switch(x)
  30. #define LATENCY_MULTIPLIER (1000)
  31. +#define SUGOV_KTHREAD_PRIORITY 50
  32.  
  33. struct sugov_tunables {
  34. struct gov_attr_set attr_set;
  35. @@ -52,8 +53,10 @@
  36.  
  37. /* The next fields are only needed if fast switch cannot be used. */
  38. struct irq_work irq_work;
  39. - struct work_struct work;
  40. + struct kthread_work work;
  41. struct mutex work_lock;
  42. + struct kthread_worker worker;
  43. + struct task_struct *thread;
  44. bool work_in_progress;
  45.  
  46. bool need_freq_update;
  47. @@ -80,24 +83,6 @@
  48.  
  49. static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
  50.  
  51. -/******************* exynos specific function *******************/
  52. -#define DEFAULT_EXPIRED_TIME 70
  53. -struct sugov_exynos {
  54. - /* for slack timer */
  55. - unsigned long min;
  56. - int enabled;
  57. - bool started;
  58. - int expired_time;
  59. - struct timer_list timer;
  60. -
  61. - /* pm_qos_class */
  62. - int qos_min_class;
  63. -};
  64. -static DEFINE_PER_CPU(struct sugov_exynos, sugov_exynos);
  65. -static void sugov_stop_slack(int cpu);
  66. -static void sugov_start_slack(int cpu);
  67. -static void sugov_update_min(struct cpufreq_policy *policy);
  68. -
  69. /************************ Governor internals ***********************/
  70.  
  71. static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
  72. @@ -141,34 +126,10 @@
  73. return false;
  74. }
  75.  
  76. -static int sugov_select_scaling_cpu(void)
  77. -{
  78. - int cpu;
  79. - cpumask_t mask;
  80. -
  81. - cpumask_clear(&mask);
  82. - cpumask_and(&mask, cpu_coregroup_mask(0), cpu_online_mask);
  83. -
  84. - /* Idle core of the boot cluster is selected to scaling cpu */
  85. - for_each_cpu(cpu, &mask)
  86. - if (idle_cpu(cpu))
  87. - return cpu;
  88. -
  89. - /* if panic_cpu is not Little core, mask will be empty */
  90. - if (unlikely(!cpumask_weight(&mask))) {
  91. - cpu = atomic_read(&panic_cpu);
  92. - if (cpu != PANIC_CPU_INVALID)
  93. - return cpu;
  94. - }
  95. -
  96. - return cpumask_weight(&mask) - 1;
  97. -}
  98. -
  99. static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
  100. unsigned int next_freq)
  101. {
  102. struct cpufreq_policy *policy = sg_policy->policy;
  103. - int cpu;
  104.  
  105. if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
  106. /* Reset cached freq as next_freq isn't changed */
  107. @@ -188,29 +149,15 @@
  108. return;
  109.  
  110. policy->cur = next_freq;
  111. - trace_cpu_frequency(next_freq, smp_processor_id());
  112. } else {
  113. - cpu = sugov_select_scaling_cpu();
  114. - if (cpu < 0)
  115. - return;
  116. -
  117. sg_policy->work_in_progress = true;
  118. - irq_work_queue_on(&sg_policy->irq_work, cpu);
  119. + irq_work_queue(&sg_policy->irq_work);
  120. }
  121. }
  122.  
  123. -#ifdef CONFIG_FREQVAR_TUNE
  124. -unsigned int freqvar_tipping_point(int cpu, unsigned int freq);
  125. -#else
  126. -static inline unsigned int freqvar_tipping_point(int cpu, unsigned int freq)
  127. -{
  128. - return freq + (freq >> 2);
  129. -}
  130. -#endif
  131. -
  132. /**
  133. * get_next_freq - Compute a new frequency for a given cpufreq policy.
  134. - * @sg_policy: schedutil policy object to compute the new frequency for.
  135. + * @sg_policy: blu_schedutil policy object to compute the new frequency for.
  136. * @util: Current CPU utilization.
  137. * @max: CPU capacity.
  138. *
  139. @@ -235,9 +182,9 @@
  140. {
  141. struct cpufreq_policy *policy = sg_policy->policy;
  142. unsigned int freq = arch_scale_freq_invariant() ?
  143. - policy->max : policy->cur;
  144. + policy->cpuinfo.max_freq : policy->cur;
  145.  
  146. - freq = freqvar_tipping_point(policy->cpu, freq) * util / max;
  147. + freq = (freq + (freq >> 2)) * util / max;
  148.  
  149. if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
  150. return sg_policy->next_freq;
  151. @@ -254,27 +201,25 @@
  152. #endif
  153. }
  154.  
  155. -extern unsigned int sched_rt_remove_ratio_for_freq;
  156. -
  157. static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
  158. {
  159. int cpu = smp_processor_id();
  160. - unsigned long max_cap;
  161. - unsigned long rt_avg = cpu_rq(cpu)->rt.avg.util_avg;
  162. + struct rq *rq = cpu_rq(cpu);
  163. + unsigned long max_cap, rt;
  164. + s64 delta;
  165.  
  166. max_cap = arch_scale_cpu_capacity(NULL, cpu);
  167.  
  168. + sched_avg_update(rq);
  169. + delta = time - rq->age_stamp;
  170. + if (unlikely(delta < 0))
  171. + delta = 0;
  172. + rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
  173. + rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
  174. +
  175. *util = boosted_cpu_util(cpu);
  176. -
  177. - if (sched_feat(UTIL_EST)) {
  178. - *util = max_t(unsigned long, *util,
  179. - READ_ONCE(cpu_rq(cpu)->cfs.avg.util_est.enqueued));
  180. - }
  181. -
  182. - if (sched_rt_remove_ratio_for_freq)
  183. - *util -= ((rt_avg * sched_rt_remove_ratio_for_freq) / 100);
  184. if (likely(use_pelt()))
  185. - *util = min(*util, max_cap);
  186. + *util = min((*util + rt), max_cap);
  187.  
  188. *max = max_cap;
  189. }
  190. @@ -291,9 +236,6 @@
  191. if (delta_ns > TICK_NSEC)
  192. sg_cpu->iowait_boost = 0;
  193. }
  194. -
  195. - /* HACK: block iowait boost to avoid unnecessary setting max frequency */
  196. - sg_cpu->iowait_boost = 0;
  197. }
  198.  
  199. static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
  200. @@ -370,7 +312,7 @@
  201. unsigned long util = 0, max = 1;
  202. unsigned int j;
  203.  
  204. - for_each_cpu_and(j, policy->related_cpus, cpu_online_mask) {
  205. + for_each_cpu(j, policy->cpus) {
  206. struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
  207. unsigned long j_util, j_max;
  208. s64 delta_ns;
  209. @@ -434,7 +376,7 @@
  210. raw_spin_unlock(&sg_policy->update_lock);
  211. }
  212.  
  213. -static void sugov_work(struct work_struct *work)
  214. +static void sugov_work(struct kthread_work *work)
  215. {
  216. struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
  217.  
  218. @@ -451,55 +393,21 @@
  219. struct sugov_policy *sg_policy;
  220.  
  221. sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
  222. - schedule_work_on(smp_processor_id(), &sg_policy->work);
  223. -}
  224. -
  225. -/************************ Governor externals ***********************/
  226. -static void update_min_rate_limit_us(struct sugov_policy *sg_policy);
  227. -void sugov_update_rate_limit_us(struct cpufreq_policy *policy,
  228. - int up_rate_limit_ms, int down_rate_limit_ms)
  229. -{
  230. - struct sugov_policy *sg_policy;
  231. - struct sugov_tunables *tunables;
  232. -
  233. - sg_policy = policy->governor_data;
  234. - if (!sg_policy)
  235. - return;
  236. -
  237. - tunables = sg_policy->tunables;
  238. - if (!tunables)
  239. - return;
  240. -
  241. - tunables->up_rate_limit_us = (unsigned int)(up_rate_limit_ms * USEC_PER_MSEC);
  242. - tunables->down_rate_limit_us = (unsigned int)(down_rate_limit_ms * USEC_PER_MSEC);
  243. -
  244. - sg_policy->up_rate_delay_ns = up_rate_limit_ms * NSEC_PER_MSEC;
  245. - sg_policy->down_rate_delay_ns = down_rate_limit_ms * NSEC_PER_MSEC;
  246. -
  247. - update_min_rate_limit_us(sg_policy);
  248. -}
  249.  
  250. -int sugov_sysfs_add_attr(struct cpufreq_policy *policy, const struct attribute *attr)
  251. -{
  252. - struct sugov_policy *sg_policy;
  253. - struct sugov_tunables *tunables;
  254. -
  255. - sg_policy = policy->governor_data;
  256. - if (!sg_policy)
  257. - return -ENODEV;
  258. -
  259. - tunables = sg_policy->tunables;
  260. - if (!tunables)
  261. - return -ENODEV;
  262. -
  263. - return sysfs_create_file(&tunables->attr_set.kobj, attr);
  264. -}
  265. -
  266. -struct cpufreq_policy *sugov_get_attr_policy(struct gov_attr_set *attr_set)
  267. -{
  268. - struct sugov_policy *sg_policy = list_first_entry(&attr_set->policy_list,
  269. - typeof(*sg_policy), tunables_hook);
  270. - return sg_policy->policy;
  271. + /*
  272. + * For RT and deadline tasks, the blu_schedutil governor shoots the
  273. + * frequency to maximum. Special care must be taken to ensure that this
  274. + * kthread doesn't result in the same behavior.
  275. + *
  276. + * This is (mostly) guaranteed by the work_in_progress flag. The flag is
  277. + * updated only at the end of the sugov_work() function and before that
  278. + * the blu_schedutil governor rejects all other frequency scaling requests.
  279. + *
  280. + * There is a very rare case though, where the RT thread yields right
  281. + * after the work_in_progress flag is cleared. The effects of that are
  282. + * neglected for now.
  283. + */
  284. + kthread_queue_work(&sg_policy->worker, &sg_policy->work);
  285. }
  286.  
  287. /************************** sysfs interface ************************/
  288. @@ -592,7 +500,7 @@
  289.  
  290. /********************** cpufreq governor interface *********************/
  291.  
  292. -static struct cpufreq_governor schedutil_gov;
  293. +static struct cpufreq_governor blu_schedutil_gov;
  294.  
  295. static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
  296. {
  297. @@ -603,19 +511,64 @@
  298. return NULL;
  299.  
  300. sg_policy->policy = policy;
  301. - init_irq_work(&sg_policy->irq_work, sugov_irq_work);
  302. - INIT_WORK(&sg_policy->work, sugov_work);
  303. - mutex_init(&sg_policy->work_lock);
  304. raw_spin_lock_init(&sg_policy->update_lock);
  305. return sg_policy;
  306. }
  307.  
  308. static void sugov_policy_free(struct sugov_policy *sg_policy)
  309. {
  310. - mutex_destroy(&sg_policy->work_lock);
  311. kfree(sg_policy);
  312. }
  313.  
  314. +static int sugov_kthread_create(struct sugov_policy *sg_policy)
  315. +{
  316. + struct task_struct *thread;
  317. + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
  318. + struct cpufreq_policy *policy = sg_policy->policy;
  319. + int ret;
  320. +
  321. + /* kthread only required for slow path */
  322. + if (policy->fast_switch_enabled)
  323. + return 0;
  324. +
  325. + kthread_init_work(&sg_policy->work, sugov_work);
  326. + kthread_init_worker(&sg_policy->worker);
  327. + thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
  328. + "sugov:%d",
  329. + cpumask_first(policy->related_cpus));
  330. + if (IS_ERR(thread)) {
  331. + pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
  332. + return PTR_ERR(thread);
  333. + }
  334. +
  335. + ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
  336. + if (ret) {
  337. + kthread_stop(thread);
  338. + pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
  339. + return ret;
  340. + }
  341. +
  342. + sg_policy->thread = thread;
  343. + kthread_bind_mask(thread, policy->related_cpus);
  344. + init_irq_work(&sg_policy->irq_work, sugov_irq_work);
  345. + mutex_init(&sg_policy->work_lock);
  346. +
  347. + wake_up_process(thread);
  348. +
  349. + return 0;
  350. +}
  351. +
  352. +static void sugov_kthread_stop(struct sugov_policy *sg_policy)
  353. +{
  354. + /* kthread only required for slow path */
  355. + if (sg_policy->policy->fast_switch_enabled)
  356. + return;
  357. +
  358. + kthread_flush_worker(&sg_policy->worker);
  359. + kthread_stop(sg_policy->thread);
  360. + mutex_destroy(&sg_policy->work_lock);
  361. +}
  362. +
  363. static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
  364. {
  365. struct sugov_tunables *tunables;
  366. @@ -641,6 +594,7 @@
  367. {
  368. struct sugov_policy *sg_policy;
  369. struct sugov_tunables *tunables;
  370. + unsigned int lat;
  371. int ret = 0;
  372.  
  373. /* State should be equivalent to EXIT */
  374. @@ -655,12 +609,16 @@
  375. goto disable_fast_switch;
  376. }
  377.  
  378. + ret = sugov_kthread_create(sg_policy);
  379. + if (ret)
  380. + goto free_sg_policy;
  381. +
  382. mutex_lock(&global_tunables_lock);
  383.  
  384. if (global_tunables) {
  385. if (WARN_ON(have_governor_per_policy())) {
  386. ret = -EINVAL;
  387. - goto free_sg_policy;
  388. + goto stop_kthread;
  389. }
  390. policy->governor_data = sg_policy;
  391. sg_policy->tunables = global_tunables;
  392. @@ -672,22 +630,15 @@
  393. tunables = sugov_tunables_alloc(sg_policy);
  394. if (!tunables) {
  395. ret = -ENOMEM;
  396. - goto free_sg_policy;
  397. + goto stop_kthread;
  398. }
  399.  
  400. - if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
  401. - tunables->up_rate_limit_us = policy->up_transition_delay_us;
  402. - tunables->down_rate_limit_us = policy->down_transition_delay_us;
  403. - } else {
  404. - unsigned int lat;
  405. -
  406. - tunables->up_rate_limit_us = UP_LATENCY_MULTIPLIER;
  407. - tunables->down_rate_limit_us = DOWN_LATENCY_MULTIPLIER;
  408. - lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
  409. - if (lat) {
  410. - tunables->up_rate_limit_us *= lat;
  411. - tunables->down_rate_limit_us *= lat;
  412. - }
  413. + tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
  414. + tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
  415. + lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
  416. + if (lat) {
  417. + tunables->up_rate_limit_us *= lat;
  418. + tunables->down_rate_limit_us *= lat;
  419. }
  420.  
  421. policy->governor_data = sg_policy;
  422. @@ -695,7 +646,7 @@
  423.  
  424. ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
  425. get_governor_parent_kobj(policy), "%s",
  426. - schedutil_gov.name);
  427. + blu_schedutil_gov.name);
  428. if (ret)
  429. goto fail;
  430.  
  431. @@ -707,6 +658,9 @@
  432. policy->governor_data = NULL;
  433. sugov_tunables_free(tunables);
  434.  
  435. + stop_kthread:
  436. + sugov_kthread_stop(sg_policy);
  437. +
  438. free_sg_policy:
  439. mutex_unlock(&global_tunables_lock);
  440.  
  441. @@ -732,43 +686,13 @@
  442. if (!count)
  443. sugov_tunables_free(tunables);
  444.  
  445. - sugov_policy_free(sg_policy);
  446. mutex_unlock(&global_tunables_lock);
  447. -}
  448. -
  449. -#ifdef CONFIG_EXYNOS_HOTPLUG_GOVERNOR
  450. -int sugov_fast_start(struct cpufreq_policy *policy, unsigned int cpu)
  451. -{
  452. - struct sugov_policy *sg_policy;
  453. - struct sugov_cpu *sg_cpu;
  454. -
  455. - down_write(&policy->rwsem);
  456. - cpumask_set_cpu(cpu, policy->cpus);
  457.  
  458. - sg_policy = policy->governor_data;
  459. - sg_cpu = &per_cpu(sugov_cpu, cpu);
  460. -
  461. - memset(sg_cpu, 0, sizeof(*sg_cpu));
  462. - sg_cpu->sg_policy = sg_policy;
  463. - sg_cpu->util = 0;
  464. - sg_cpu->max = 0;
  465. - sg_cpu->flags = 0;
  466. - sg_cpu->last_update = 0;
  467. - sg_cpu->iowait_boost = 0;
  468. - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
  469. - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
  470. - sugov_update_shared);
  471. -
  472. - up_write(&policy->rwsem);
  473. + sugov_kthread_stop(sg_policy);
  474. + sugov_policy_free(sg_policy);
  475.  
  476. - return 1;
  477. -}
  478. -#else
  479. -int sugov_fast_start(struct cpufreq_policy *policy, unsigned int cpu)
  480. -{
  481. - return 0;
  482. + cpufreq_disable_fast_switch(policy);
  483. }
  484. -#endif
  485.  
  486. static int sugov_start(struct cpufreq_policy *policy)
  487. {
  488. @@ -791,15 +715,13 @@
  489.  
  490. memset(sg_cpu, 0, sizeof(*sg_cpu));
  491. sg_cpu->sg_policy = sg_policy;
  492. - sg_cpu->flags = 0;
  493. - sugov_start_slack(cpu);
  494. + sg_cpu->flags = SCHED_CPUFREQ_DL;
  495. sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
  496. cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
  497. policy_is_shared(policy) ?
  498. sugov_update_shared :
  499. sugov_update_single);
  500. }
  501. -
  502. return 0;
  503. }
  504.  
  505. @@ -808,43 +730,32 @@
  506. struct sugov_policy *sg_policy = policy->governor_data;
  507. unsigned int cpu;
  508.  
  509. - for_each_cpu(cpu, policy->cpus) {
  510. - sugov_stop_slack(cpu);
  511. + for_each_cpu(cpu, policy->cpus)
  512. cpufreq_remove_update_util_hook(cpu);
  513. - }
  514.  
  515. synchronize_sched();
  516.  
  517. - irq_work_sync(&sg_policy->irq_work);
  518. - cancel_work_sync(&sg_policy->work);
  519. + if (!policy->fast_switch_enabled) {
  520. + irq_work_sync(&sg_policy->irq_work);
  521. + kthread_cancel_work_sync(&sg_policy->work);
  522. + }
  523. }
  524.  
  525. static void sugov_limits(struct cpufreq_policy *policy)
  526. {
  527. struct sugov_policy *sg_policy = policy->governor_data;
  528.  
  529. - mutex_lock(&global_tunables_lock);
  530. -
  531. - if (!sg_policy) {
  532. - mutex_unlock(&global_tunables_lock);
  533. - return;
  534. - }
  535. -
  536. if (!policy->fast_switch_enabled) {
  537. mutex_lock(&sg_policy->work_lock);
  538. cpufreq_policy_apply_limits(policy);
  539. mutex_unlock(&sg_policy->work_lock);
  540. }
  541.  
  542. - sugov_update_min(policy);
  543. -
  544. sg_policy->need_freq_update = true;
  545. -
  546. - mutex_unlock(&global_tunables_lock);
  547. }
  548.  
  549. -static struct cpufreq_governor schedutil_gov = {
  550. - .name = "schedutil",
  551. +static struct cpufreq_governor blu_schedutil_gov = {
  552. + .name = "blu_schedutil",
  553. .owner = THIS_MODULE,
  554. .init = sugov_init,
  555. .exit = sugov_exit,
  556. @@ -853,244 +764,15 @@
  557. .limits = sugov_limits,
  558. };
  559.  
  560. -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
  561. +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BLU_SCHEDUTIL
  562. struct cpufreq_governor *cpufreq_default_governor(void)
  563. {
  564. - return &schedutil_gov;
  565. + return &blu_schedutil_gov;
  566. }
  567. #endif
  568. -static void sugov_update_min(struct cpufreq_policy *policy)
  569. -{
  570. - int cpu, max_cap;
  571. - struct sugov_exynos *sg_exynos;
  572. - int min_cap;
  573. -
  574. - max_cap = arch_scale_cpu_capacity(NULL, policy->cpu);
  575. -
  576. - /* min_cap is minimum value making higher frequency than policy->min */
  577. - min_cap = max_cap * policy->min / policy->max;
  578. - min_cap = (min_cap * 4 / 5) + 1;
  579. -
  580. - for_each_cpu(cpu, policy->cpus) {
  581. - sg_exynos = &per_cpu(sugov_exynos, cpu);
  582. - sg_exynos->min = min_cap;
  583. - }
  584. -}
  585. -
  586. -static void sugov_nop_timer(unsigned long data)
  587. -{
  588. - /*
  589. - * The purpose of slack-timer is to wake up the CPU from IDLE, in order
  590. - * to decrease its frequency if it is not set to minimum already.
  591. - *
  592. - * This is important for platforms where CPU with higher frequencies
  593. - * consume higher power even at IDLE.
  594. - */
  595. - trace_sugov_slack_func(smp_processor_id());
  596. -}
  597. -
  598. -static void sugov_start_slack(int cpu)
  599. -{
  600. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  601. -
  602. - if (!sg_exynos->enabled)
  603. - return;
  604. -
  605. - sg_exynos->min = ULONG_MAX;
  606. - sg_exynos->started = true;
  607. -}
  608. -
  609. -static void sugov_stop_slack(int cpu)
  610. -{
  611. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  612. -
  613. - sg_exynos->started = false;
  614. - if (timer_pending(&sg_exynos->timer))
  615. - del_timer_sync(&sg_exynos->timer);
  616. -}
  617. -
  618. -static s64 get_next_event_time_ms(void)
  619. -{
  620. - return ktime_to_us(tick_nohz_get_sleep_length());
  621. -}
  622. -
  623. -static int sugov_need_slack_timer(unsigned int cpu)
  624. -{
  625. - struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
  626. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  627. -
  628. - if (schedtune_cpu_boost(cpu))
  629. - return 0;
  630. -
  631. - if (sg_cpu->util > sg_exynos->min &&
  632. - get_next_event_time_ms() > sg_exynos->expired_time)
  633. - return 1;
  634. -
  635. - return 0;
  636. -}
  637. -
  638. -static int sugov_pm_notifier(struct notifier_block *self,
  639. - unsigned long action, void *v)
  640. -{
  641. - unsigned int cpu = raw_smp_processor_id();
  642. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  643. - struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
  644. - struct timer_list *timer = &sg_exynos->timer;
  645. -
  646. - if (!sg_exynos->started)
  647. - return NOTIFY_OK;
  648. -
  649. - switch (action) {
  650. - case CPU_PM_ENTER_PREPARE:
  651. - if (timer_pending(timer))
  652. - del_timer_sync(timer);
  653. -
  654. - if (sugov_need_slack_timer(cpu)) {
  655. - timer->expires = jiffies + msecs_to_jiffies(sg_exynos->expired_time);
  656. - add_timer_on(timer, cpu);
  657. - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, 1);
  658. - }
  659. - break;
  660. -
  661. - case CPU_PM_ENTER:
  662. - if (timer_pending(timer) && !sugov_need_slack_timer(cpu)) {
  663. - del_timer_sync(timer);
  664. - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
  665. - }
  666. - break;
  667. -
  668. - case CPU_PM_EXIT_POST:
  669. - if (timer_pending(timer) && (time_after(timer->expires, jiffies))) {
  670. - del_timer_sync(timer);
  671. - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
  672. - }
  673. - break;
  674. - }
  675. -
  676. - return NOTIFY_OK;
  677. -}
  678. -
  679. -static struct notifier_block sugov_pm_nb = {
  680. - .notifier_call = sugov_pm_notifier,
  681. -};
  682. -
  683. -static int find_cpu_pm_qos_class(int pm_qos_class)
  684. -{
  685. - int cpu;
  686. -
  687. - for_each_possible_cpu(cpu) {
  688. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  689. -
  690. - if ((sg_exynos->qos_min_class == pm_qos_class) &&
  691. - cpumask_test_cpu(cpu, cpu_active_mask))
  692. - return cpu;
  693. - }
  694. -
  695. - pr_err("cannot find cpu of PM QoS class\n");
  696. - return -EINVAL;
  697. -}
  698. -
  699. -static int sugov_pm_qos_callback(struct notifier_block *nb,
  700. - unsigned long val, void *v)
  701. -{
  702. - struct sugov_cpu *sg_cpu;
  703. - struct cpufreq_policy *policy;
  704. - int pm_qos_class = *((int *)v);
  705. - unsigned int next_freq;
  706. - int cpu;
  707. -
  708. - cpu = find_cpu_pm_qos_class(pm_qos_class);
  709. - if (cpu < 0)
  710. - return NOTIFY_BAD;
  711. -
  712. - sg_cpu = &per_cpu(sugov_cpu, cpu);
  713. - if (!sg_cpu || !sg_cpu->sg_policy || !sg_cpu->sg_policy->policy)
  714. - return NOTIFY_BAD;
  715. -
  716. - next_freq = sg_cpu->sg_policy->next_freq;
  717. -
  718. - policy = cpufreq_cpu_get(cpu);
  719. - if (!policy)
  720. - return NOTIFY_BAD;
  721. -
  722. - if (val >= policy->cur) {
  723. - cpufreq_cpu_put(policy);
  724. - return NOTIFY_BAD;
  725. - }
  726. -
  727. - __cpufreq_driver_target(policy, next_freq, CPUFREQ_RELATION_L);
  728. -
  729. - cpufreq_cpu_put(policy);
  730. -
  731. - return NOTIFY_OK;
  732. -}
  733. -
  734. -static struct notifier_block sugov_min_qos_notifier = {
  735. - .notifier_call = sugov_pm_qos_callback,
  736. - .priority = INT_MIN,
  737. -};
  738. -
  739. -static int __init sugov_parse_dt(struct device_node *dn, int cpu)
  740. -{
  741. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  742. -
  743. - /* parsing slack info */
  744. - if (of_property_read_u32(dn, "enabled", &sg_exynos->enabled))
  745. - return -EINVAL;
  746. - if (sg_exynos->enabled)
  747. - if (of_property_read_u32(dn, "expired_time", &sg_exynos->expired_time))
  748. - sg_exynos->expired_time = DEFAULT_EXPIRED_TIME;
  749. -
  750. - /* parsing pm_qos_class info */
  751. - if (of_property_read_u32(dn, "qos_min_class", &sg_exynos->qos_min_class))
  752. - return -EINVAL;
  753. -
  754. - return 0;
  755. -}
  756. -
  757. -static void __init sugov_exynos_init(void)
  758. -{
  759. - int cpu, ret;
  760. - struct device_node *dn = NULL;
  761. - const char *buf;
  762. -
  763. - while ((dn = of_find_node_by_type(dn, "schedutil-domain"))) {
  764. - struct cpumask shared_mask;
  765. - /* Get shared cpus */
  766. - ret = of_property_read_string(dn, "shared-cpus", &buf);
  767. - if (ret)
  768. - goto exit;
  769. -
  770. - cpulist_parse(buf, &shared_mask);
  771. - for_each_cpu(cpu, &shared_mask)
  772. - if (sugov_parse_dt(dn, cpu))
  773. - goto exit;
  774. - }
  775. -
  776. - for_each_possible_cpu(cpu) {
  777. - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
  778. -
  779. - if (!sg_exynos->enabled)
  780. - continue;
  781. -
  782. - /* Initialize slack-timer */
  783. - init_timer_pinned(&sg_exynos->timer);
  784. - sg_exynos->timer.function = sugov_nop_timer;
  785. - }
  786. -
  787. - pm_qos_add_notifier(PM_QOS_CLUSTER0_FREQ_MIN, &sugov_min_qos_notifier);
  788. - pm_qos_add_notifier(PM_QOS_CLUSTER1_FREQ_MIN, &sugov_min_qos_notifier);
  789. - cpu_pm_register_notifier(&sugov_pm_nb);
  790. -
  791. - return;
  792. -exit:
  793. - pr_info("%s: failed to initialized slack_timer, pm_qos handler\n", __func__);
  794. -}
  795.  
  796. static int __init sugov_register(void)
  797. {
  798. - sugov_exynos_init();
  799. -
  800. - return cpufreq_register_governor(&schedutil_gov);
  801. + return cpufreq_register_governor(&blu_schedutil_gov);
  802. }
  803. fs_initcall(sugov_register);
  804. [eamon@eamon-archpc starlte-endurance]$
Add Comment
Please, Sign In to add comment