1. /*
  2. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  3. * http://www.samsung.com
  4. *
  5. * EXYNOS - CPU frequency scaling support for EXYNOS series
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12. * kind, whether express or implied; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/err.h>
  18. #include <linux/clk.h>
  19. #include <linux/io.h>
  20. #include <linux/slab.h>
  21. #include <linux/regulator/consumer.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/suspend.h>
  24. #include <linux/module.h>
  25. #include <linux/reboot.h>
  26. #include <linux/delay.h>
  27. #include <linux/cpu.h>
  28. #include <linux/pm_qos.h>
  29. #include <asm/cputype.h>
  30. #include <asm/bL_switcher.h>
  31. #include <mach/cpufreq.h>
  32. #include <mach/regs-pmu.h>
  33. #include <mach/tmu.h>
  34. #include <mach/asv-exynos.h>
  35. #include <plat/cpu.h>
  36. #include <mach/sec_debug.h>
  37. struct lpj_info {
  38. unsigned long ref;
  39. unsigned int freq;
  40. };
  41. static struct lpj_info global_lpj_ref;
  42. /* Use boot_freq when entering sleep mode */
  43. static unsigned int boot_freq;
  44. /* For switcher */
  45. static unsigned int freq_min[CA_END] __read_mostly; /* Minimum (Big/Little) clock frequency */
  46. static unsigned int freq_max[CA_END] __read_mostly; /* Maximum (Big/Little) clock frequency */
  47. static struct cpumask cluster_cpus[CA_END]; /* cpu number on (Big/Little) cluster */
  48. static unsigned long lpj[CA_END];
  49. #define ACTUAL_FREQ(x, cur) ((cur == CA7) ? (x) << 1 : (x))
  50. #define VIRT_FREQ(x, cur) ((cur == CA7) ? (x) >> 1 : (x))
  51. /*
  52. * This value is based on the difference between the dmips value of A15/A7
  53. * It is used to revise cpu frequency when changing cluster
  54. */
  55. #define UP_IKS_THRESH 6
  56. #define DOWN_IKS_THRESH 18
  57. #define DOWN_STEP_OLD 1100000
  58. #define DOWN_STEP_NEW 600000
  59. #define UP_STEP_OLD 550000
  60. #define UP_STEP_NEW 600000
  61. #define STEP_LEVEL_CA7_MAX 600000
  62. #define STEP_LEVEL_CA15_MIN 800000
  63. #define LIMIT_COLD_VOLTAGE 1250000
  64. #define CPU_MAX_COUNT 4
  65. static struct exynos_dvfs_info *exynos_info[CA_END];
  66. static struct exynos_dvfs_info exynos_info_CA7;
  67. static struct exynos_dvfs_info exynos_info_CA15;
  68. static struct cpufreq_frequency_table *merge_freq_table;
  69. static struct regulator *arm_regulator;
  70. static struct regulator *kfc_regulator;
  71. static unsigned int volt_offset;
  72. static int volt_powerdown[CA_END];
  73. static unsigned int policy_max_freq;
  74. static struct cpufreq_freqs *freqs[CA_END];
  75. static unsigned int exynos5410_bb_con0;
  76. static unsigned int user_set_max_freq;
  77. static unsigned int user_set_eagle_count;
  78. static unsigned int all_cpu_freqs[CPU_MAX_COUNT];
  79. static DEFINE_MUTEX(cpufreq_lock);
  80. static DEFINE_MUTEX(cpufreq_scale_lock);
  81. static bool exynos_cpufreq_init_done;
  82. /* Include CPU mask of each cluster */
  83. static cluster_type boot_cluster;
  84. DEFINE_PER_CPU(cluster_type, cpu_cur_cluster);
  85. static DEFINE_PER_CPU(unsigned int, req_freq);
  86. static struct pm_qos_request boot_cpu_qos;
  87. static struct pm_qos_request min_cpu_qos;
  88. static struct cpufreq_policy fake_policy[CA_END][NR_CPUS];
  89. static unsigned int get_limit_voltage(unsigned int voltage)
  90. {
  91. if (voltage > LIMIT_COLD_VOLTAGE)
  92. return voltage;
  93. if (voltage + volt_offset > LIMIT_COLD_VOLTAGE)
  94. return LIMIT_COLD_VOLTAGE;
  95. return voltage + volt_offset;
  96. }
  97. static void init_cpumask_cluster_set(unsigned int cluster)
  98. {
  99. unsigned int i;
  100. for_each_cpu(i, cpu_possible_mask) {
  101. per_cpu(cpu_cur_cluster, i) = cluster;
  102. if (cluster == CA15)
  103. cpumask_set_cpu(i, &cluster_cpus[CA15]);
  104. else
  105. cpumask_set_cpu(i, &cluster_cpus[CA7]);
  106. }
  107. }
  108. cluster_type get_cur_cluster(unsigned int cpu)
  109. {
  110. return per_cpu(cpu_cur_cluster, cpu);
  111. }
  112. cluster_type get_boot_cluster()
  113. {
  114. return boot_cluster;
  115. }
  116. static void set_cur_cluster(unsigned int cpu, cluster_type target_cluster)
  117. {
  118. per_cpu(cpu_cur_cluster, cpu) = target_cluster;
  119. }
  120. void reset_lpj_for_cluster(cluster_type cluster)
  121. {
  122. lpj[!cluster] = 0;
  123. }
  124. static unsigned int get_num_CA15(void)
  125. {
  126. unsigned int j, num = 0;
  127. for_each_cpu(j, cpu_possible_mask) {
  128. if (per_cpu(cpu_cur_cluster, j) == CA15 && cpu_online(j))
  129. num++;
  130. }
  131. return num;
  132. }
  133. static void set_boot_freq(void)
  134. {
  135. int i;
  136. for (i = 0; i < CA_END; i++) {
  137. if (exynos_info[i] == NULL)
  138. continue;
  139. exynos_info[i]->boot_freq
  140. = clk_get_rate(exynos_info[i]->cpu_clk) / 1000;
  141. }
  142. }
  143. static unsigned int get_boot_freq(unsigned int cluster)
  144. {
  145. if (exynos_info[cluster] == NULL)
  146. return 0;
  147. return exynos_info[cluster]->boot_freq;
  148. }
  149. /* Get table size */
  150. static unsigned int cpufreq_get_table_size(
  151. struct cpufreq_frequency_table *table,
  152. unsigned int cluster_id)
  153. {
  154. int size = 0;
  155. if (cluster_id == CA15) {
  156. for (; (table[size].frequency != CPUFREQ_TABLE_END); size++)
  157. ;
  158. } else {
  159. for (; (table[size].frequency != CPUFREQ_TABLE_END); size++)
  160. ;
  161. }
  162. return size;
  163. }
  164. /*
  165. * copy entries of all the per-cluster cpufreq_frequency_table entries
  166. * into a single frequency table which is published to cpufreq core
  167. */
  168. static int cpufreq_merge_tables(void)
  169. {
  170. int cluster_id, i;
  171. unsigned int total_sz = 0, size[CA_END];
  172. struct cpufreq_frequency_table *freq_table;
  173. for (cluster_id = 0; cluster_id < CA_END; cluster_id++) {
  174. size[cluster_id] = cpufreq_get_table_size(
  175. exynos_info[cluster_id]->freq_table, cluster_id);
  176. total_sz += size[cluster_id];
  177. }
  178. freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
  179. (total_sz + 1), GFP_KERNEL);
  180. merge_freq_table = freq_table;
  181. memcpy(freq_table, exynos_info[CA15]->freq_table,
  182. size[CA15] * sizeof(struct cpufreq_frequency_table));
  183. freq_table += size[CA15];
  184. memcpy(freq_table, exynos_info[CA7]->freq_table,
  185. size[CA7] * sizeof(struct cpufreq_frequency_table));
  186. for (i = size[CA15]; i <= total_sz ; i++) {
  187. if (merge_freq_table[i].frequency != CPUFREQ_ENTRY_INVALID)
  188. merge_freq_table[i].frequency >>= 1;
  189. }
  190. merge_freq_table[total_sz].frequency = CPUFREQ_TABLE_END;
  191. for (i = 0; merge_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
  192. pr_debug("merged_table index: %d freq: %d\n", i,
  193. merge_freq_table[i].frequency);
  194. }
  195. return 0;
  196. }
  197. static bool is_alive(unsigned int cluster)
  198. {
  199. unsigned int tmp;
  200. tmp = __raw_readl(cluster == CA15 ? EXYNOS5410_ARM_COMMON_STATUS :
  201. EXYNOS5410_KFC_COMMON_STATUS) & 0x3;
  202. return tmp ? true : false;
  203. }
  204. /*
  205. * Requesting core switch to other cluster. It save the current status
  206. * and wakes up the core of new cluster. Then waking core restore status and
  207. * take a task of other cluster.
  208. */
  209. #ifdef CONFIG_BL_SWITCHER
  210. static void switch_to_entry(unsigned int cpu,
  211. cluster_type target_cluster)
  212. {
  213. bL_switch_request(cpu, !target_cluster);
  214. per_cpu(cpu_cur_cluster, cpu) = target_cluster;
  215. }
  216. #endif
  217. int exynos_verify_speed(struct cpufreq_policy *policy)
  218. {
  219. return cpufreq_frequency_table_verify(policy, merge_freq_table);
  220. }
  221. unsigned int exynos_getspeed_cluster(cluster_type cluster)
  222. {
  223. return VIRT_FREQ(clk_get_rate(exynos_info[cluster]->cpu_clk) / 1000, cluster);
  224. }
  225. unsigned int exynos_getspeed(unsigned int cpu)
  226. {
  227. unsigned int cur = get_cur_cluster(cpu);
  228. return exynos_getspeed_cluster(cur);
  229. }
  230. static unsigned int get_max_req_freq(unsigned int cluster_id)
  231. {
  232. unsigned int i, max_freq = 0, tmp = 0, cur;
  233. for_each_online_cpu(i) {
  234. cur = get_cur_cluster(i);
  235. if (cur == cluster_id) {
  236. tmp = per_cpu(req_freq, i);
  237. if (tmp > max_freq)
  238. max_freq = tmp;
  239. }
  240. }
  241. return max_freq;
  242. }
  243. static void set_req_freq(unsigned int cpu, unsigned int freq)
  244. {
  245. per_cpu(req_freq, cpu) = freq;
  246. }
  247. static unsigned int exynos_get_safe_volt(unsigned int old_index,
  248. unsigned int new_index,
  249. unsigned int cur)
  250. {
  251. unsigned int safe_arm_volt = 0;
  252. struct cpufreq_frequency_table *freq_table
  253. = exynos_info[cur]->freq_table;
  254. unsigned int *volt_table = exynos_info[cur]->volt_table;
  255. /*
  256. * ARM clock source will be changed APLL to MPLL temporary
  257. * To support this level, need to control regulator for
  258. * reguired voltage level
  259. */
  260. if (exynos_info[cur]->need_apll_change != NULL) {
  261. if (exynos_info[cur]->need_apll_change(old_index, new_index) &&
  262. (freq_table[new_index].frequency
  263. < exynos_info[cur]->mpll_freq_khz) &&
  264. (freq_table[old_index].frequency
  265. < exynos_info[cur]->mpll_freq_khz)) {
  266. safe_arm_volt
  267. = volt_table[exynos_info[cur]->pll_safe_idx];
  268. }
  269. }
  270. return safe_arm_volt;
  271. }
  272. /* Determine valid target frequency using freq_table */
  273. int exynos5_frequency_table_target(struct cpufreq_policy *policy,
  274. struct cpufreq_frequency_table *table,
  275. unsigned int target_freq,
  276. unsigned int relation,
  277. unsigned int *index)
  278. {
  279. unsigned int i;
  280. if (!cpu_online(policy->cpu))
  281. return -EINVAL;
  282. for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
  283. unsigned int freq = table[i].frequency;
  284. if (freq == CPUFREQ_ENTRY_INVALID)
  285. continue;
  286. if (target_freq == freq) {
  287. *index = i;
  288. break;
  289. }
  290. }
  291. if (table[i].frequency == CPUFREQ_TABLE_END)
  292. return -EINVAL;
  293. return 0;
  294. }
  295. static int is_cpufreq_valid(int cpu)
  296. {
  297. struct cpufreq_policy policy;
  298. return !cpufreq_get_policy(&policy, cpu);
  299. }
  300. static int exynos_cpufreq_scale(unsigned int target_freq,
  301. unsigned int curr_freq, unsigned int cpu)
  302. {
  303. unsigned int cur = get_cur_cluster(cpu);
  304. struct cpufreq_frequency_table *freq_table
  305. = exynos_info[cur]->freq_table;
  306. unsigned int *volt_table = exynos_info[cur]->volt_table;
  307. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  308. struct regulator *regulator = exynos_info[cur]->regulator;
  309. unsigned int new_index, old_index, j;
  310. unsigned int volt, safe_volt = 0;
  311. int ret = 0;
  312. if (!policy)
  313. return ret;
  314. if (!is_alive(cur))
  315. goto out;
  316. freqs[cur]->cpu = cpu;
  317. freqs[cur]->new = target_freq;
  318. if (exynos5_frequency_table_target(policy, freq_table,
  319. ACTUAL_FREQ(curr_freq, cur),
  320. CPUFREQ_RELATION_L, &old_index)) {
  321. ret = -EINVAL;
  322. goto out;
  323. }
  324. if (exynos5_frequency_table_target(policy, freq_table,
  325. ACTUAL_FREQ(freqs[cur]->new, cur),
  326. CPUFREQ_RELATION_L, &new_index)) {
  327. ret = -EINVAL;
  328. goto out;
  329. }
  330. /*
  331. * ARM clock source will be changed APLL to MPLL temporary
  332. * To support this level, need to control regulator for
  333. * required voltage level
  334. */
  335. safe_volt = exynos_get_safe_volt(old_index, new_index, cur);
  336. if (safe_volt)
  337. safe_volt = get_limit_voltage(safe_volt);
  338. volt = get_limit_voltage(volt_table[new_index]);
  339. /* Update policy current frequency */
  340. for_each_cpu(j, &cluster_cpus[cur]) {
  341. if (is_cpufreq_valid(j)) {
  342. freqs[cur]->cpu = j;
  343. cpufreq_notify_transition(
  344. freqs[cur], CPUFREQ_PRECHANGE);
  345. }
  346. }
  347. /* When the new frequency is higher than current frequency */
  348. if ((ACTUAL_FREQ(freqs[cur]->new, cur) >
  349. ACTUAL_FREQ(freqs[cur]->old, cur)) && !safe_volt)
  350. /* Firstly, voltage up to increase frequency */
  351. regulator_set_voltage(regulator, volt, volt);
  352. if (safe_volt)
  353. regulator_set_voltage(regulator, safe_volt, safe_volt);
  354. if (old_index != new_index)
  355. exynos_info[cur]->set_freq(old_index, new_index);
  356. if (!global_lpj_ref.freq) {
  357. global_lpj_ref.ref = loops_per_jiffy;
  358. global_lpj_ref.freq = freqs[cur]->old;
  359. }
  360. lpj[cur] = cpufreq_scale(global_lpj_ref.ref,
  361. global_lpj_ref.freq, freqs[cur]->new);
  362. loops_per_jiffy = max(lpj[CA7], lpj[CA15]);
  363. for_each_cpu(j, &cluster_cpus[cur]) {
  364. if (is_cpufreq_valid(j)) {
  365. freqs[cur]->cpu = j;
  366. cpufreq_notify_transition(
  367. freqs[cur], CPUFREQ_POSTCHANGE);
  368. }
  369. }
  370. /* When the new frequency is lower than current frequency */
  371. if ((ACTUAL_FREQ(freqs[cur]->new, cur) <
  372. ACTUAL_FREQ(freqs[cur]->old, cur)) ||
  373. ((ACTUAL_FREQ(freqs[cur]->new, cur) >
  374. ACTUAL_FREQ(freqs[cur]->old, cur)) && safe_volt))
  375. /* down the voltage after frequency change */
  376. regulator_set_voltage(regulator, volt, volt);
  377. out:
  378. cpufreq_cpu_put(policy);
  379. return ret;
  380. }
  381. static cluster_type exynos_switch(struct cpufreq_policy *policy, cluster_type cur)
  382. {
  383. unsigned int cpu;
  384. cluster_type new_cluster;
  385. new_cluster = !cur;
  386. for_each_cpu(cpu, policy->cpus) {
  387. switch_to_entry(cpu, new_cluster);
  388. /* set big/litte-cpu mask */
  389. cpumask_clear_cpu(cpu, &cluster_cpus[cur]);
  390. cpumask_set_cpu(cpu, &cluster_cpus[new_cluster]);
  391. }
  392. return new_cluster;
  393. }
  394. /* Set clock frequency */
  395. static int exynos_target(struct cpufreq_policy *policy,
  396. unsigned int target_freq,
  397. unsigned int relation)
  398. {
  399. /* read current cluster */
  400. cluster_type cur = get_cur_cluster(policy->cpu);
  401. unsigned int index, new_freq = 0, do_switch = 0;
  402. int count, ret = 0;
  403. bool user = false, later = false, limit_eagle = false;
  404. int cpu, delta;
  405. unsigned int min_cpu = 0, min_freq = UINT_MAX;
  406. mutex_lock(&cpufreq_lock);
  407. all_cpu_freqs[policy->cpu] = target_freq;
  408. /* delta means the number of cores which has to switch */
  409. delta = get_num_CA15() - user_set_eagle_count;
  410. if (delta > 0) {
  411. /* find minimum frequency core among CA15 */
  412. for_each_online_cpu(cpu) {
  413. if (all_cpu_freqs[cpu] > freq_max[CA7] &&
  414. all_cpu_freqs[cpu] < min_freq) {
  415. min_cpu = cpu;
  416. min_freq = all_cpu_freqs[cpu];
  417. }
  418. }
  419. /* if current core freq is minimum, switch to CA7 */
  420. if (min_cpu == policy->cpu) {
  421. target_freq = freq_max[CA7];
  422. limit_eagle = true;
  423. all_cpu_freqs[policy->cpu] = target_freq;
  424. }
  425. }
  426. /* set target_freq only if core cannot swtich due to user set value */
  427. if (get_num_CA15() < CPU_MAX_COUNT) {
  428. if (cur == CA7 && user_set_eagle_count <= get_num_CA15()
  429. && target_freq > freq_max[CA7]) {
  430. target_freq = freq_max[CA7];
  431. limit_eagle = true;
  432. all_cpu_freqs[policy->cpu] = target_freq;
  433. }
  434. }
  435. if (exynos_info[cur]->blocked)
  436. goto out;
  437. count = get_num_CA15();
  438. /* get current frequency */
  439. freqs[cur]->old = exynos_getspeed(policy->cpu);
  440. /* save the frequency & cpu number */
  441. set_req_freq(policy->cpu, target_freq);
  442. sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_CLOCK_SWITCH_CHANGE, "IN : cpu=%d cluster=%c pre=%d, new=%d", policy->cpu, (cur == CA7 ? 'L' : 'B'), freqs[cur]->old, target_freq);
  443. #if defined(CONFIG_CPU_FREQ_GOV_USERSPACE) || defined(CONFIG_CPU_FREQ_GOV_PERFORMANCE)
  444. if ((strcmp(policy->governor->name, "userspace") == 0)
  445. || strcmp(policy->governor->name, "performance") == 0) {
  446. user = true;
  447. goto done;
  448. }
  449. #endif
  450. if (freqs[cur]->old <= UP_STEP_OLD && target_freq > UP_STEP_NEW)
  451. target_freq = STEP_LEVEL_CA7_MAX;
  452. if (freqs[cur]->old >= DOWN_STEP_OLD && target_freq < DOWN_STEP_NEW) {
  453. if (strcmp(policy->governor->name, "ondemand") == 0)
  454. target_freq = STEP_LEVEL_CA15_MIN;
  455. else
  456. target_freq = STEP_LEVEL_CA7_MAX;
  457. }
  458. if (!limit_eagle)
  459. target_freq = max((unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MIN), target_freq);
  460. done:
  461. if (cur == CA15 && target_freq < freq_min[CA15]) {
  462. do_switch = 1; /* Switch to Little */
  463. } else if (cur == CA7 && user_set_eagle_count > get_num_CA15()
  464. && target_freq > freq_max[CA7]) {
  465. do_switch = 1; /* Switch from LITTLE to big */
  466. if (count > 0 && count < 4 &&
  467. target_freq > exynos_info[cur]->max_op_freqs[count + 1])
  468. later = true;
  469. }
  470. #ifdef CONFIG_BL_SWITCHER
  471. if (do_switch) {
  472. if (later) {
  473. cur = !cur;
  474. count++;
  475. set_cur_cluster(policy->cpu, cur);
  476. } else {
  477. cur = exynos_switch(policy, cur);
  478. }
  479. freqs[cur]->old = exynos_getspeed_cluster(cur);
  480. policy->cur = freqs[cur]->old;
  481. }
  482. #endif
  483. if (user)
  484. new_freq = target_freq;
  485. else
  486. new_freq = max(get_max_req_freq(cur), target_freq);
  487. new_freq = min(new_freq, exynos_info[cur]->max_op_freqs[count]);
  488. if (cpufreq_frequency_table_target(&fake_policy[cur][policy->cpu],
  489. exynos_info[cur]->freq_table, ACTUAL_FREQ(new_freq, cur), relation, &index)) {
  490. ret = -EINVAL;
  491. goto out;
  492. }
  493. new_freq = exynos_info[cur]->freq_table[index].frequency;
  494. /* frequency and volt scaling */
  495. ret = exynos_cpufreq_scale(VIRT_FREQ(new_freq, cur),
  496. freqs[cur]->old, policy->cpu);
  497. #ifdef CONFIG_BL_SWITCHER
  498. if (do_switch && later)
  499. exynos_switch(policy, !cur);
  500. #endif
  501. out:
  502. sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_CLOCK_SWITCH_CHANGE, "OUT : cpu=%d cluster=%c pre=%d, new=%d other=%x", \
  503. policy->cpu, (cur == CA7 ? 'L' : 'B'), freqs[cur]->old, VIRT_FREQ(new_freq,cur), do_switch << 2 | later << 1 | (ret == 0 ? 0 : 1));
  504. mutex_unlock(&cpufreq_lock);
  505. return ret;
  506. }
  507. #ifdef CONFIG_PM
  508. static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
  509. {
  510. exynos5410_bb_con0 = __raw_readl(EXYNOS5410_BB_CON0);
  511. return 0;
  512. }
  513. static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
  514. {
  515. freqs[CA7]->old = VIRT_FREQ(get_boot_freq(CA7), CA7);
  516. freqs[CA15]->old = VIRT_FREQ(get_boot_freq(CA15), CA15);
  517. __raw_writel(exynos5410_bb_con0, EXYNOS5410_BB_CON0);
  518. return 0;
  519. }
  520. #endif
  521. void exynos_lowpower_for_cluster(cluster_type cluster, bool on)
  522. {
  523. int volt;
  524. mutex_lock(&cpufreq_lock);
  525. if (cluster == CA15) {
  526. if (on) {
  527. volt_powerdown[CA15] = regulator_get_voltage(arm_regulator);
  528. volt = get_match_volt(ID_ARM, ACTUAL_FREQ(freq_min[CA15], CA15));
  529. volt = get_limit_voltage(volt);
  530. regulator_set_voltage(arm_regulator, volt, volt);
  531. } else {
  532. volt = volt_powerdown[CA15];
  533. volt = get_limit_voltage(volt);
  534. regulator_set_voltage(arm_regulator, volt, volt);
  535. }
  536. } else {
  537. if (on) {
  538. volt_powerdown[CA7] = regulator_get_voltage(kfc_regulator);
  539. volt = get_match_volt(ID_KFC, ACTUAL_FREQ(freq_min[CA7], CA7));
  540. volt = get_limit_voltage(volt);
  541. regulator_set_voltage(kfc_regulator, volt, volt);
  542. } else {
  543. volt = volt_powerdown[CA7];
  544. volt = get_limit_voltage(volt);
  545. regulator_set_voltage(kfc_regulator, volt, volt);
  546. }
  547. }
  548. mutex_unlock(&cpufreq_lock);
  549. }
  550. /*
  551. * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
  552. * context
  553. * @notifier
  554. * @pm_event
  555. * @v
  556. *
  557. * While cpufreq_disable == true, target() ignores every frequency but
  558. * boot_freq. The boot_freq value is the initial frequency,
  559. * which is set by the bootloader. In order to eliminate possible
  560. * inconsistency in clock values, we save and restore frequencies during
  561. * suspend and resume and block CPUFREQ activities. Note that the standard
  562. * suspend/resume cannot be used as they are too deep (syscore_ops) for
  563. * regulator actions.
  564. */
  565. static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
  566. unsigned long pm_event, void *v)
  567. {
  568. unsigned int freqCA7, freqCA15;
  569. unsigned int bootfreqCA7, bootfreqCA15;
  570. int volt;
  571. switch (pm_event) {
  572. case PM_SUSPEND_PREPARE:
  573. mutex_lock(&cpufreq_lock);
  574. exynos_info[CA7]->blocked = true;
  575. exynos_info[CA15]->blocked = true;
  576. mutex_unlock(&cpufreq_lock);
  577. bootfreqCA7 = VIRT_FREQ(get_boot_freq(CA7), CA7);
  578. bootfreqCA15 = VIRT_FREQ(get_boot_freq(CA15), CA15);
  579. freqCA7 = exynos_getspeed_cluster(CA7);
  580. freqCA15 = exynos_getspeed_cluster(CA15);
  581. volt = max(get_match_volt(ID_KFC, ACTUAL_FREQ(bootfreqCA7, CA7)),
  582. get_match_volt(ID_KFC, ACTUAL_FREQ(freqCA7, CA7)));
  583. volt = get_limit_voltage(volt);
  584. if (regulator_set_voltage(kfc_regulator, volt, volt))
  585. goto err;
  586. volt = max(get_match_volt(ID_ARM, ACTUAL_FREQ(bootfreqCA15, CA15)),
  587. get_match_volt(ID_ARM, ACTUAL_FREQ(freqCA15, CA15)));
  588. volt = get_limit_voltage(volt);
  589. if (regulator_set_voltage(arm_regulator, volt, volt))
  590. goto err;
  591. pr_debug("PM_SUSPEND_PREPARE for CPUFREQ\n");
  592. break;
  593. case PM_POST_SUSPEND:
  594. pr_debug("PM_POST_SUSPEND for CPUFREQ\n");
  595. mutex_lock(&cpufreq_lock);
  596. exynos_info[CA7]->blocked = false;
  597. exynos_info[CA15]->blocked = false;
  598. mutex_unlock(&cpufreq_lock);
  599. break;
  600. }
  601. return NOTIFY_OK;
  602. err:
  603. pr_err("%s: failed to set voltage\n", __func__);
  604. return NOTIFY_BAD;
  605. }
  606. static struct notifier_block exynos_cpufreq_nb = {
  607. .notifier_call = exynos_cpufreq_pm_notifier,
  608. };
  609. static int exynos_cpufreq_tmu_notifier(struct notifier_block *notifier,
  610. unsigned long event, void *v)
  611. {
  612. int volt;
  613. int *on = v;
  614. if (event != TMU_COLD)
  615. return NOTIFY_OK;
  616. mutex_lock(&cpufreq_lock);
  617. if (*on)
  618. volt_offset = 75000;
  619. else
  620. volt_offset = 0;
  621. volt = get_limit_voltage(regulator_get_voltage(arm_regulator));
  622. regulator_set_voltage(arm_regulator, volt, volt);
  623. volt = get_limit_voltage(regulator_get_voltage(kfc_regulator));
  624. regulator_set_voltage(kfc_regulator, volt, volt);
  625. mutex_unlock(&cpufreq_lock);
  626. return NOTIFY_OK;
  627. }
  628. static struct notifier_block exynos_tmu_nb = {
  629. .notifier_call = exynos_cpufreq_tmu_notifier,
  630. };
  631. static int exynos_policy_notifier(struct notifier_block *nb,
  632. unsigned long val, void *data)
  633. {
  634. struct cpufreq_policy *policy = data;
  635. unsigned int cpu = policy->cpu;
  636. if (val != CPUFREQ_ADJUST)
  637. return 0;
  638. if (!cpu_online(cpu))
  639. return -EINVAL;
  640. if (policy->max <= freq_max[CA7]) {
  641. fake_policy[CA7][cpu].max = ACTUAL_FREQ(policy->max, CA7);
  642. fake_policy[CA15][cpu].max = freq_max[CA15];
  643. } else {
  644. fake_policy[CA7][cpu].max = ACTUAL_FREQ(freq_max[CA7], CA7);
  645. fake_policy[CA15][cpu].max = policy->max;
  646. }
  647. if (policy->min <= freq_max[CA7]) {
  648. fake_policy[CA7][cpu].min = ACTUAL_FREQ(policy->min, CA7);
  649. fake_policy[CA15][cpu].min = freq_min[CA15];
  650. } else {
  651. fake_policy[CA7][cpu].min = ACTUAL_FREQ(freq_min[CA7], CA7);
  652. fake_policy[CA15][cpu].min = policy->min;
  653. }
  654. return 0;
  655. }
  656. static struct notifier_block notifier_policy_block = {
  657. .notifier_call = exynos_policy_notifier,
  658. };
  659. static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
  660. {
  661. pr_debug("%s: cpu[%d]\n", __func__, policy->cpu);
  662. policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
  663. freqs[CA7]->old = exynos_getspeed_cluster(CA7);
  664. freqs[CA15]->old = exynos_getspeed_cluster(CA15);
  665. boot_freq = exynos_getspeed(policy->cpu);
  666. cpufreq_frequency_table_get_attr(
  667. merge_freq_table, policy->cpu);
  668. /* set the transition latency value */
  669. policy->cpuinfo.transition_latency = 100000;
  670. cpumask_clear(policy->cpus);
  671. cpumask_set_cpu(policy->cpu, policy->cpus);
  672. return cpufreq_frequency_table_cpuinfo(
  673. policy, merge_freq_table);
  674. }
  675. static struct cpufreq_driver exynos_driver = {
  676. .flags = CPUFREQ_STICKY,
  677. .verify = exynos_verify_speed,
  678. .target = exynos_target,
  679. .get = exynos_getspeed,
  680. .init = exynos_cpufreq_cpu_init,
  681. .name = "exynos_cpufreq",
  682. #ifdef CONFIG_PM
  683. .suspend = exynos_cpufreq_suspend,
  684. .resume = exynos_cpufreq_resume,
  685. #endif
  686. };
  687. /************************** sysfs interface ************************/
  688. static ssize_t show_min_freq(struct kobject *kobj,
  689. struct attribute *attr, char *buf)
  690. {
  691. return sprintf(buf, "%u\n", (unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MIN));
  692. }
  693. static ssize_t show_max_freq(struct kobject *kobj,
  694. struct attribute *attr, char *buf)
  695. {
  696. return sprintf(buf, "%u\n", user_set_max_freq);
  697. }
  698. static ssize_t show_max_eagle_count(struct kobject *kobj,
  699. struct attribute *attr, char *buf)
  700. {
  701. return sprintf(buf, "%u\n", user_set_eagle_count);
  702. }
  703. static ssize_t store_min_freq(struct kobject *kobj, struct attribute *attr,
  704. const char *buf, size_t count)
  705. {
  706. int cpu;
  707. unsigned int input;
  708. struct cpufreq_policy *policy;
  709. if (!sscanf(buf, "%u", &input))
  710. return -EINVAL;
  711. if (input > freq_max[CA15])
  712. input = freq_max[CA15];
  713. if (input <= freq_min[CA7]) {
  714. if (pm_qos_request_active(&min_cpu_qos))
  715. pm_qos_remove_request(&min_cpu_qos);
  716. } else {
  717. if (pm_qos_request_active(&min_cpu_qos))
  718. pm_qos_update_request(&min_cpu_qos, input);
  719. else
  720. pm_qos_add_request(&min_cpu_qos, PM_QOS_CPU_FREQ_MIN, input);
  721. }
  722. for_each_online_cpu(cpu) {
  723. policy = cpufreq_cpu_get(cpu);
  724. if (policy)
  725. exynos_target(policy, input, CPUFREQ_RELATION_L);
  726. cpufreq_cpu_put(policy);
  727. }
  728. return count;
  729. }
  730. static ssize_t store_max_freq(struct kobject *kobj, struct attribute *attr,
  731. const char *buf, size_t count)
  732. {
  733. int cpu;
  734. unsigned int input;
  735. struct cpufreq_policy *policy;
  736. if (!sscanf(buf, "%u", &input))
  737. return -EINVAL;
  738. if (input > freq_max[CA15] || input <= 0)
  739. input = freq_max[CA15];
  740. user_set_max_freq = input;
  741. for_each_online_cpu(cpu) {
  742. policy = cpufreq_cpu_get(cpu);
  743. if (policy) {
  744. policy->user_policy.max = input;
  745. cpufreq_update_policy(cpu);
  746. }
  747. cpufreq_cpu_put(policy);
  748. }
  749. return count;
  750. }
  751. static ssize_t store_max_eagle_count(struct kobject *kobj, struct attribute *attr,
  752. const char *buf, size_t count)
  753. {
  754. unsigned int input;
  755. if (!sscanf(buf, "%u", &input))
  756. return -EINVAL;
  757. input = (input > CPU_MAX_COUNT) ? CPU_MAX_COUNT : input;
  758. input = (input < 0) ? 0 : input;
  759. user_set_eagle_count = input;
  760. return count;
  761. }
  762. define_one_global_rw(min_freq);
  763. define_one_global_rw(max_freq);
  764. define_one_global_rw(max_eagle_count);
  765. static struct attribute *iks_attributes[] = {
  766. &min_freq.attr,
  767. &max_freq.attr,
  768. &max_eagle_count.attr,
  769. NULL
  770. };
  771. static struct attribute_group iks_attr_group = {
  772. .attrs = iks_attributes,
  773. .name = "iks-cpufreq",
  774. };
  775. /************************** sysfs end ************************/
  776. static int exynos_cpufreq_reboot_notifier_call(struct notifier_block *this,
  777. unsigned long code, void *_cmd)
  778. {
  779. unsigned int freqCA7, freqCA15;
  780. unsigned int bootfreqCA7, bootfreqCA15;
  781. int volt;
  782. mutex_lock(&cpufreq_lock);
  783. exynos_info[CA7]->blocked = true;
  784. exynos_info[CA15]->blocked = true;
  785. mutex_unlock(&cpufreq_lock);
  786. bootfreqCA7 = VIRT_FREQ(get_boot_freq(CA7), CA7);
  787. bootfreqCA15 = VIRT_FREQ(get_boot_freq(CA15), CA15);
  788. freqCA7 = exynos_getspeed_cluster(CA7);
  789. freqCA15 = exynos_getspeed_cluster(CA15);
  790. volt = max(get_match_volt(ID_KFC, ACTUAL_FREQ(bootfreqCA7, CA7)),
  791. get_match_volt(ID_KFC, ACTUAL_FREQ(freqCA7, CA7)));
  792. volt = get_limit_voltage(volt);
  793. if (regulator_set_voltage(kfc_regulator, volt, volt))
  794. goto err;
  795. volt = max(get_match_volt(ID_ARM, ACTUAL_FREQ(bootfreqCA15, CA15)),
  796. get_match_volt(ID_ARM, ACTUAL_FREQ(freqCA15, CA15)));
  797. volt = get_limit_voltage(volt);
  798. if (regulator_set_voltage(arm_regulator, volt, volt))
  799. goto err;
  800. return NOTIFY_DONE;
  801. err:
  802. pr_err("%s: failed to set voltage\n", __func__);
  803. return NOTIFY_BAD;
  804. }
  805. static struct notifier_block exynos_cpufreq_reboot_notifier = {
  806. .notifier_call = exynos_cpufreq_reboot_notifier_call,
  807. };
  808. static int exynos_qos_handler(struct notifier_block *b, unsigned long val, void *v)
  809. {
  810. int ret;
  811. unsigned int freq;
  812. cluster_type cluster;
  813. struct cpufreq_policy *policy;
  814. int cpu;
  815. if (val > freq_max[CA7]) {
  816. freq = exynos_getspeed_cluster(CA15);
  817. cluster = CA15;
  818. } else {
  819. freq = exynos_getspeed_cluster(CA7);
  820. cluster = CA7;
  821. }
  822. if (freq >= val || (cluster == CA7 && cpumask_empty(&cluster_cpus[CA7])))
  823. return NOTIFY_OK;
  824. if (cluster == CA15 && cpumask_empty(&cluster_cpus[CA15]))
  825. cpu = 0;
  826. else
  827. cpu = cpumask_first(&cluster_cpus[cluster]);
  828. policy = cpufreq_cpu_get(cpu);
  829. if (!policy)
  830. return NOTIFY_BAD;
  831. #if defined(CONFIG_CPU_FREQ_GOV_USERSPACE) || defined(CONFIG_CPU_FREQ_GOV_PERFORMANCE)
  832. if ((strcmp(policy->governor->name, "userspace") == 0)
  833. || strcmp(policy->governor->name, "performance") == 0)
  834. return NOTIFY_OK;
  835. #endif
  836. ret = cpufreq_driver_target(policy, val,
  837. CPUFREQ_RELATION_H);
  838. cpufreq_cpu_put(policy);
  839. if (ret < 0)
  840. return NOTIFY_BAD;
  841. return NOTIFY_OK;
  842. }
  843. static struct notifier_block exynos_qos_notifier = {
  844. .notifier_call = exynos_qos_handler,
  845. };
  846. static int __cpuinit exynos_hotplug_cpu_handler(struct notifier_block *nfb,
  847. unsigned long action, void *hcpu)
  848. {
  849. unsigned int cpu;
  850. unsigned int wakeup_cpu = (unsigned long)hcpu, max_freq;
  851. struct cpufreq_policy *policy;
  852. bool change_max = false;
  853. switch (action) {
  854. case CPU_ONLINE:
  855. if (!policy_max_freq)
  856. return NOTIFY_OK;
  857. if (num_online_cpus() < NR_CPUS) {
  858. /*
  859. * If policy.max of other cores are changed,
  860. * set the wakeup core's policy.max to same value
  861. */
  862. policy = cpufreq_cpu_get(wakeup_cpu);
  863. if (!policy)
  864. return NOTIFY_BAD;
  865. if (policy->max != policy_max_freq) {
  866. policy->user_policy.max = policy_max_freq;
  867. cpufreq_update_policy(wakeup_cpu);
  868. }
  869. cpufreq_cpu_put(policy);
  870. } else {
  871. /* All cores wake up, restore the policy.max to original */
  872. for_each_online_cpu(cpu) {
  873. policy = cpufreq_cpu_get(cpu);
  874. if (!policy)
  875. return NOTIFY_BAD;
  876. if (policy->max != policy_max_freq) {
  877. policy->user_policy.max = policy_max_freq;
  878. pr_info("IKS-CPUFREQ: Restore cpu%d max_freq [%d] -> [%d]\n",
  879. cpu, policy->max, policy_max_freq);
  880. cpufreq_update_policy(cpu);
  881. }
  882. cpufreq_cpu_put(policy);
  883. }
  884. policy_max_freq = 0;
  885. }
  886. break;
  887. case CPU_UP_PREPARE:
  888. if (per_cpu(cpu_cur_cluster, wakeup_cpu) == CA15) {
  889. policy = cpufreq_cpu_get(smp_processor_id());
  890. if (!policy)
  891. return NOTIFY_BAD;
  892. max_freq = exynos_info[CA15]->max_op_freqs[get_num_CA15() + 1];
  893. if (policy->max > max_freq) {
  894. change_max = true;
  895. if (!policy_max_freq)
  896. policy_max_freq = policy->max;
  897. }
  898. cpufreq_cpu_put(policy);
  899. if (change_max) {
  900. for_each_online_cpu(cpu) {
  901. policy = cpufreq_cpu_get(cpu);
  902. if (!policy)
  903. return NOTIFY_BAD;
  904. policy->user_policy.max = max_freq;
  905. pr_info("IKS-CPUFREQ: Change cpu%d max_freq [%d] -> [%d]\n",
  906. cpu, policy->max, max_freq);
  907. cpufreq_update_policy(cpu);
  908. cpufreq_cpu_put(policy);
  909. }
  910. }
  911. }
  912. break;
  913. }
  914. return NOTIFY_OK;
  915. }
  916. static struct notifier_block __cpuinitdata exynos_hotplug_cpu_notifier = {
  917. .notifier_call = exynos_hotplug_cpu_handler,
  918. };
  919. static int __init exynos_cpufreq_init(void)
  920. {
  921. int ret = -EINVAL;
  922. int cpu;
  923. boot_cluster = 0;
  924. exynos_info[CA7] = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
  925. if (!exynos_info[CA7]) {
  926. ret = -ENOMEM;
  927. goto err_alloc_info_CA7;
  928. }
  929. exynos_info[CA15] = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
  930. if (!exynos_info[CA15]) {
  931. ret = -ENOMEM;
  932. goto err_alloc_info_CA15;
  933. }
  934. freqs[CA7] = kzalloc(sizeof(struct cpufreq_freqs), GFP_KERNEL);
  935. if (!freqs[CA7]) {
  936. ret = -ENOMEM;
  937. goto err_alloc_freqs_CA7;
  938. }
  939. freqs[CA15] = kzalloc(sizeof(struct cpufreq_freqs), GFP_KERNEL);
  940. if (!freqs[CA15]) {
  941. ret = -ENOMEM;
  942. goto err_alloc_freqs_CA15;
  943. }
  944. /* Get to boot_cluster_num - 0 for CA7; 1 for CA15 */
  945. boot_cluster = !(read_cpuid(CPUID_MPIDR) >> 8 & 0xf);
  946. pr_debug("%s: boot_cluster is %s\n", __func__,
  947. boot_cluster == CA7 ? "CA7" : "CA15");
  948. init_cpumask_cluster_set(boot_cluster);
  949. ret = exynos5410_cpufreq_CA7_init(&exynos_info_CA7);
  950. if (ret)
  951. goto err_init_cpufreq;
  952. ret = exynos5410_cpufreq_CA15_init(&exynos_info_CA15);
  953. if (ret)
  954. goto err_init_cpufreq;
  955. arm_regulator = regulator_get(NULL, "vdd_arm");
  956. if (IS_ERR(arm_regulator)) {
  957. pr_err("%s: failed to get resource vdd_arm\n", __func__);
  958. goto err_vdd_arm;
  959. }
  960. kfc_regulator = regulator_get(NULL, "vdd_kfc");
  961. if (IS_ERR(kfc_regulator)) {
  962. pr_err("%s:failed to get resource vdd_kfc\n", __func__);
  963. goto err_vdd_kfc;
  964. }
  965. memcpy(exynos_info[CA7], &exynos_info_CA7,
  966. sizeof(struct exynos_dvfs_info));
  967. exynos_info[CA7]->regulator = kfc_regulator;
  968. memcpy(exynos_info[CA15], &exynos_info_CA15,
  969. sizeof(struct exynos_dvfs_info));
  970. exynos_info[CA15]->regulator = arm_regulator;
  971. if (exynos_info[CA7]->set_freq == NULL) {
  972. pr_err("%s: No set_freq function (ERR)\n", __func__);
  973. goto err_set_freq;
  974. }
  975. freq_max[CA15] = exynos_info[CA15]->
  976. freq_table[exynos_info[CA15]->max_support_idx].frequency;
  977. freq_min[CA15] = exynos_info[CA15]->
  978. freq_table[exynos_info[CA15]->min_support_idx].frequency;
  979. freq_max[CA7] = VIRT_FREQ(exynos_info[CA7]->
  980. freq_table[exynos_info[CA7]->max_support_idx].frequency, CA7);
  981. freq_min[CA7] = VIRT_FREQ(exynos_info[CA7]->
  982. freq_table[exynos_info[CA7]->min_support_idx].frequency, CA7);
  983. cpufreq_merge_tables();
  984. set_boot_freq();
  985. register_pm_notifier(&exynos_cpufreq_nb);
  986. register_reboot_notifier(&exynos_cpufreq_reboot_notifier);
  987. exynos_tmu_add_notifier(&exynos_tmu_nb);
  988. pm_qos_add_notifier(PM_QOS_CPU_FREQ_MIN, &exynos_qos_notifier);
  989. for_each_cpu(cpu, cpu_possible_mask) {
  990. fake_policy[CA15][cpu].cpu = cpu;
  991. fake_policy[CA15][cpu].max = freq_max[CA15];
  992. fake_policy[CA15][cpu].min = freq_min[CA15];
  993. fake_policy[CA7][cpu].max = ACTUAL_FREQ(freq_max[CA7], CA7);
  994. fake_policy[CA7][cpu].min = ACTUAL_FREQ(freq_min[CA7], CA7);
  995. }
  996. cpufreq_register_notifier(&notifier_policy_block, CPUFREQ_POLICY_NOTIFIER);
  997. if (cpufreq_register_driver(&exynos_driver)) {
  998. pr_err("%s: failed to register cpufreq driver\n", __func__);
  999. goto err_cpufreq;
  1000. }
  1001. register_cpu_notifier(&exynos_hotplug_cpu_notifier);
  1002. user_set_eagle_count = CPU_MAX_COUNT;
  1003. user_set_max_freq = freq_max[CA15];
  1004. pm_qos_add_request(&min_cpu_qos, PM_QOS_CPU_FREQ_MIN, freq_min[CA7]);
  1005. ret = sysfs_create_group(cpufreq_global_kobject, &iks_attr_group);
  1006. if (ret) {
  1007. pr_err("%s: failed to create iks-cpufreq sysfs interface\n", __func__);
  1008. goto err_cpufreq;
  1009. }
  1010. pm_qos_add_request(&boot_cpu_qos, PM_QOS_CPU_FREQ_MIN, 0);
  1011. pm_qos_update_request_timeout(&boot_cpu_qos, 1200000, 60000 * 1000);
  1012. exynos_cpufreq_init_done = true;
  1013. return 0;
  1014. err_cpufreq:
  1015. unregister_pm_notifier(&exynos_cpufreq_nb);
  1016. err_set_freq:
  1017. regulator_put(kfc_regulator);
  1018. err_vdd_kfc:
  1019. if (!IS_ERR(kfc_regulator))
  1020. regulator_put(kfc_regulator);
  1021. err_vdd_arm:
  1022. if (!IS_ERR(arm_regulator))
  1023. regulator_put(arm_regulator);
  1024. err_init_cpufreq:
  1025. kfree(freqs[CA15]);
  1026. err_alloc_freqs_CA15:
  1027. kfree(freqs[CA7]);
  1028. err_alloc_freqs_CA7:
  1029. kfree(exynos_info[CA15]);
  1030. err_alloc_info_CA15:
  1031. kfree(exynos_info[CA7]);
  1032. err_alloc_info_CA7:
  1033. pr_err("%s: failed initialization\n", __func__);
  1034. return ret;
  1035. }
  1036. late_initcall(exynos_cpufreq_init);