~xdavidwu/linux-xdavidwu-xps

ref: 3efac9c8047bdd068b0d0d379843751b26b9a4c2 linux-xdavidwu-xps/0118-add-scheduler-turbo3-patch.patch -rw-r--r-- 2.6 KiB
3efac9c8Pinghao Wu 6.14 13 days ago
                                                                                
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Arjan van de Ven <arjan@linux.intel.com>
Date: Wed, 21 Nov 2018 21:21:44 +0000
Subject: [PATCH] add scheduler turbo3 patch

Small scheduler tweak to make the scheduler more turbo3 aware
---
 arch/x86/kernel/itmt.c | 14 ++++++++++++++
 kernel/sched/fair.c    | 19 +++++++++++++++++++
 2 files changed, 33 insertions(+)

diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 9ff480e94511..57027bfed25f 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -172,6 +172,11 @@ int arch_asym_cpu_priority(int cpu)
 	return per_cpu(sched_core_priority, cpu);
 }
 
+extern int best_core;
+extern int second_best_core;
+static int best_core_score;
+static int second_best_core_score;
+
 /**
  * sched_set_itmt_core_prio() - Set CPU priority based on ITMT
  * @prio:	Priority of cpu core
@@ -201,5 +206,14 @@ void sched_set_itmt_core_prio(int prio, int core_cpu)
 		smt_prio = prio * smp_num_siblings / (i * i);
 		per_cpu(sched_core_priority, cpu) = smt_prio;
 		i++;
+
+		if (smt_prio > best_core_score) {
+			best_core = cpu;
+			best_core_score = smt_prio;
+		} else
+		if (smt_prio > second_best_core_score) {
+			second_best_core = cpu;
+			second_best_core_score = smt_prio;
+		}
 	}
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f461f059278..632fcb22f4e2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6854,6 +6854,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
  *
  * Returns the target CPU number.
  */
+
+int best_core = -1;
+int second_best_core = -1;
+
 static int
 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
 {
@@ -6882,6 +6886,21 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
 		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
 	}
 
+	if (prev_cpu != best_core && prev_cpu != second_best_core &&
+		       cpu_rq(prev_cpu)->nr_running != 0) {
+		if (second_best_core != -1 && cpu_rq(second_best_core)->nr_running == 0 &&
+			       nr_iowait_cpu(second_best_core) < 2 && cpu_to_node(prev_cpu) == cpu_to_node(second_best_core))
+			prev_cpu = second_best_core;
+		if (best_core != -1 && cpu_rq(best_core)->nr_running == 0 &&
+			       nr_iowait_cpu(best_core) < 2  && cpu_to_node(prev_cpu) == cpu_to_node(best_core))
+			prev_cpu = best_core;
+	}
+/*
+	if (prev_cpu > 0 && cpu_rq(prev_cpu)->nr_running != 0 && cpu_rq(prev_cpu - 1)->nr_running == 0)
+		prev_cpu = prev_cpu - 1;
+*/
+
+
 	rcu_read_lock();
 	for_each_domain(cpu, tmp) {
 		/*
-- 
https://clearlinux.org