Sie sind auf Seite 1von 54

1 #ifndef _LINUX_SCHED_H

2 #define _LINUX_SCHED_H
3
4 #include <uapi/linux/sched.h>
5
6 #include <linux/sched/prio.h>
7
8
9 struct sched_param {
10
int sched_priority;
11 };
12
13 #include <asm/param.h> /* for HZ */
14
15 #include <linux/capability.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/timex.h>
20 #include <linux/jiffies.h>
21 #include <linux/plist.h>
22 #include <linux/rbtree.h>
23 #include <linux/thread_info.h>
24 #include <linux/cpumask.h>
25 #include <linux/errno.h>
26 #include <linux/nodemask.h>
27 #include <linux/mm_types.h>
28 #include <linux/preempt_mask.h>
29
30 #include <asm/page.h>
31 #include <asm/ptrace.h>
32 #include <linux/cputime.h>
33
34 #include <linux/smp.h>
35 #include <linux/sem.h>
36 #include <linux/shm.h>
37 #include <linux/signal.h>
38 #include <linux/compiler.h>
39 #include <linux/completion.h>
40 #include <linux/pid.h>
41 #include <linux/percpu.h>
42 #include <linux/topology.h>
43 #include <linux/proportions.h>
44 #include <linux/seccomp.h>
45 #include <linux/rcupdate.h>
46 #include <linux/rculist.h>
47 #include <linux/rtmutex.h>
48
49 #include <linux/time.h>
50 #include <linux/param.h>
51 #include <linux/resource.h>
52 #include <linux/timer.h>
53 #include <linux/hrtimer.h>
54 #include <linux/task_io_accounting.h>
55 #include <linux/latencytop.h>
56 #include <linux/cred.h>
57 #include <linux/llist.h>
58 #include <linux/uidgid.h>
59 #include <linux/gfp.h>
60 #include <linux/magic.h>

61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

#include <asm/processor.h>
#define SCHED_ATTR_SIZE_VER0

48

/* sizeof first published struct */

/*
* Extended scheduling parameters data structure.
*
* This is needed because the original struct sched_param can not be
* altered without introducing ABI issues with legacy applications
* (e.g., in sched_getparam()).
*
* However, the possibility of specifying more than just a priority for
* the tasks may be useful for a wide variety of application fields, e.g.,
* multimedia, streaming, automation and control, and many others.
*
* This variant (sched_attr) is meant at describing a so-called
* sporadic time-constrained task. In such model a task is specified by:
* - the activation period or minimum instance inter-arrival time;
* - the maximum (or average, depending on the actual scheduling
*
discipline) computation time of all instances, a.k.a. runtime;
* - the deadline (relative to the actual activation time) of each
*
instance.
* Very briefly, a periodic (sporadic) task asks for the execution of
* some specific computation --which is typically called an instance-* (at most) every period. Moreover, each instance typically lasts no more
* than the runtime and must be completed by time instant t equal to
* the instance activation time + the deadline.
*
* This is reflected by the actual fields of the sched_attr structure:
*
* @size
size of the structure, for fwd/bwd compat.
*
* @sched_policy
task's scheduling policy
* @sched_flags
for customizing the scheduler behaviour
* @sched_nice
task's nice value
(SCHED_NORMAL/BATCH)
* @sched_priority
task's static priority (SCHED_FIFO/RR)
* @sched_deadline
representative of the task's deadline
* @sched_runtime
representative of the task's runtime
* @sched_period
representative of the task's period
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
* timing constraints.
*
* As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
* only user of this new interface. More information about the algorithm
* available in the scheduling class file or in Documentation/.
*/
struct sched_attr {
u32 size;
u32 sched_policy;
u64 sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
s32 sched_nice;
/* SCHED_FIFO, SCHED_RR */
u32 sched_priority;

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
t);
153
154
155
156
157
/
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

/* SCHED_DEADLINE */
u64 sched_runtime;
u64 sched_deadline;
u64 sched_period;
};
struct
struct
struct
struct
struct
struct
struct
struct

exec_domain;
futex_pi_state;
robust_list_head;
bio_list;
fs_struct;
perf_event_context;
blk_plug;
filename;

#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
/*
* These are the constant used to fake the fixed-point load-average
* counting. Some notes:
* - 11 bit fractions expand to 22 bits by the multiplies: this gives
*
a load-average precision of 10 bits integer + 11 bits fractional
* - if you want to count load-averages more often, you need more
*
precision, or rounding will get you. With 2-second counting freq,
*
the EXP_n values would be 1981, 2034 and 2043 if still using only
*
11 bit fractions.
*/
extern unsigned long avenrun[];
/* Load averages */
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shif
#define
#define
#define
#define

FSHIFT
FIXED_1
LOAD_FREQ
EXP_1

#define EXP_5
#define EXP_15

11
(1<<FSHIFT)
(5*HZ+1)
1884

/*
/*
/*
/*

nr of bits of precision */
1.0 as fixed-point */
5 sec intervals */
1/exp(5sec/1min) as fixed-point *

2014
2037

/* 1/exp(5sec/5min) */
/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
load *= exp; \
load += n*(FIXED_1-exp); \
load >>= FSHIFT;
extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);

179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
];
223
224
225
226
227
228
229
230
231
D)
232
233
234
235
236

extern unsigned long get_parent_ip(unsigned long addr);


extern void dump_cpu_task(int cpu);
struct seq_file;
struct cfs_rq;
struct task_group;
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#endif
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING
0
#define TASK_INTERRUPTIBLE
1
#define TASK_UNINTERRUPTIBLE
2
#define __TASK_STOPPED
4
#define __TASK_TRACED
8
/* in tsk->exit_state */
#define EXIT_DEAD
16
#define EXIT_ZOMBIE
32
#define EXIT_TRACE
(EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD
64
#define TASK_WAKEKILL
128
#define TASK_WAKING
256
#define TASK_PARKED
512
#define TASK_STATE_MAX
1024
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE
(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED
(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED
(TASK_WAKEKILL | __TASK_TRACED)
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL
(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL
(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACE
/* get_task_state() */
#define TASK_REPORT

(TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)

237
238 #define task_is_traced(task)
((task->state & __TASK_TRACED) != 0)
239 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
240 #define task_is_stopped_or_traced(task) \
241
((task->state & (__TASK_STOPPED | __TASK_TRACED)) !=
0)
242 #define task_contributes_to_load(task) \
243
((task->state & TASK_UNINTERRUPTIBLE) != 0 &
& \
244
(task->flags & PF_FROZEN) == 0)
245
246 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
247
248 #define __set_task_state(tsk, state_value)
\
249
do {
\
250
(tsk)->task_state_change = _THIS_IP_;
\
251
(tsk)->state = (state_value);
\
252
} while (0)
253 #define set_task_state(tsk, state_value)
\
254
do {
\
255
(tsk)->task_state_change = _THIS_IP_;
\
256
set_mb((tsk)->state, (state_value));
\
257
} while (0)
258
259 /*
260 * set_current_state() includes a barrier so that the write of current->stat
e
261 * is correctly serialised wrt the caller's subsequent test of whether to
262 * actually sleep:
263 *
264 *
set_current_state(TASK_UNINTERRUPTIBLE);
265 *
if (do_i_need_to_sleep())
266 *
schedule();
267 *
268 * If the caller does not need such serialisation then use __set_current_sta
te()
269 */
270 #define __set_current_state(state_value)
\
271
do {
\
272
current->task_state_change = _THIS_IP_;
\
273
current->state = (state_value);
\
274
} while (0)
275 #define set_current_state(state_value)
\
276
do {
\
277
current->task_state_change = _THIS_IP_;
\
278
set_mb(current->state, (state_value));
\
279
} while (0)
280
281 #else
282
283 #define __set_task_state(tsk, state_value)
\
284
do { (tsk)->state = (state_value); } while (0)
285 #define set_task_state(tsk, state_value)
\
286
set_mb((tsk)->state, (state_value))
287
288 /*
289 * set_current_state() includes a barrier so that the write of current->stat
e
290 * is correctly serialised wrt the caller's subsequent test of whether to
291 * actually sleep:

292 *
293 *
set_current_state(TASK_UNINTERRUPTIBLE);
294 *
if (do_i_need_to_sleep())
295 *
schedule();
296 *
297 * If the caller does not need such serialisation then use __set_current_sta
te()
298 */
299 #define __set_current_state(state_value)
\
300
do { current->state = (state_value); } while (0)
301 #define set_current_state(state_value)
\
302
set_mb(current->state, (state_value))
303
304 #endif
305
306 /* Task command name length */
307 #define TASK_COMM_LEN 16
308
309 #include <linux/spinlock.h>
310
311 /*
312 * This serializes "schedule()" and also protects
313 * the run-queue from deletions/modifications (but
314 * _adding_ to the beginning of the run-queue has
315 * a separate lock).
316 */
317 extern rwlock_t tasklist_lock;
318 extern spinlock_t mmlist_lock;
319
320 struct task_struct;
321
322 #ifdef CONFIG_PROVE_RCU
323 extern int lockdep_tasklist_lock_is_held(void);
324 #endif /* #ifdef CONFIG_PROVE_RCU */
325
326 extern void sched_init(void);
327 extern void sched_init_smp(void);
328 extern asmlinkage void schedule_tail(struct task_struct *prev);
329 extern void init_idle(struct task_struct *idle, int cpu);
330 extern void init_idle_bootup_task(struct task_struct *idle);
331
332 extern int runqueue_is_locked(int cpu);
333
334 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
335 extern void nohz_balance_enter_idle(int cpu);
336 extern void set_cpu_sd_state_idle(void);
337 extern int get_nohz_timer_target(int pinned);
338 #else
339 static inline void nohz_balance_enter_idle(int cpu) { }
340 static inline void set_cpu_sd_state_idle(void) { }
341 static inline int get_nohz_timer_target(int pinned)
342 {
343
return smp_processor_id();
344 }
345 #endif
346
347 /*
348 * Only dump TASK_* tasks. (0 for all tasks)
349 */
350 extern void show_state_filter(unsigned long state_filter);

351
352 static inline void show_state(void)
353 {
354
show_state_filter(0);
355 }
356
357 extern void show_regs(struct pt_regs *);
358
359 /*
360 * TASK is a pointer to the task whose backtrace we want to see (or NULL for
current
361 * task), SP is the stack pointer of the first frame that should be shown in
the back
362 * trace (or NULL if the entire call-chain of the task should be shown).
363 */
364 extern void show_stack(struct task_struct *task, unsigned long *sp);
365
366 extern void cpu_init (void);
367 extern void trap_init(void);
368 extern void update_process_times(int user);
369 extern void scheduler_tick(void);
370
371 extern void sched_show_task(struct task_struct *p);
372
373 #ifdef CONFIG_LOCKUP_DETECTOR
374 extern void touch_softlockup_watchdog(void);
375 extern void touch_softlockup_watchdog_sync(void);
376 extern void touch_all_softlockup_watchdogs(void);
377 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
378
void __user *buffer,
379
size_t *lenp, loff_t *ppos);
380 extern unsigned int softlockup_panic;
381 void lockup_detector_init(void);
382 #else
383 static inline void touch_softlockup_watchdog(void)
384 {
385 }
386 static inline void touch_softlockup_watchdog_sync(void)
387 {
388 }
389 static inline void touch_all_softlockup_watchdogs(void)
390 {
391 }
392 static inline void lockup_detector_init(void)
393 {
394 }
395 #endif
396
397 #ifdef CONFIG_DETECT_HUNG_TASK
398 void reset_hung_task_detector(void);
399 #else
400 static inline void reset_hung_task_detector(void)
401 {
402 }
403 #endif
404
405 /* Attach to any functions which should be ignored in wchan output. */
406 #define __sched
__attribute__((__section__(".sched.text")))
407
408 /* Linker adds these: start and end of __sched functions */

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468

extern char __sched_text_start[], __sched_text_end[];


/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT
LONG_MAX
extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
extern long io_schedule_timeout(long timeout);
static inline void io_schedule(void)
{
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
#define SUID_DUMP_DISABLE
#define SUID_DUMP_USER
#define SUID_DUMP_ROOT

0
1
2

/* No setuid dumping */
/* Dump as user of process */
/* Dump as root */

/* mm flags */
/* for SUID_DUMP_* above */
#define MMF_DUMPABLE_BITS 2
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
extern void set_dumpable(struct mm_struct *mm, int value);
/*
* This returns the actual value of the suid_dumpable flag. For things
* that are using this for checking for privilege transitions, it must
* test against SUID_DUMP_USER rather than treating it as a boolean
* value.
*/
static inline int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & MMF_DUMPABLE_MASK;
}
static inline int get_dumpable(struct mm_struct *mm)
{

469
return __get_dumpable(mm->flags);
470 }
471
472 /* coredump filter bits */
473 #define MMF_DUMP_ANON_PRIVATE 2
474 #define MMF_DUMP_ANON_SHARED
3
475 #define MMF_DUMP_MAPPED_PRIVATE 4
476 #define MMF_DUMP_MAPPED_SHARED 5
477 #define MMF_DUMP_ELF_HEADERS
6
478 #define MMF_DUMP_HUGETLB_PRIVATE 7
479 #define MMF_DUMP_HUGETLB_SHARED 8
480
481 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
482 #define MMF_DUMP_FILTER_BITS
7
483 #define MMF_DUMP_FILTER_MASK \
484
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
485 #define MMF_DUMP_FILTER_DEFAULT \
486
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
487
(1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
488
489 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
490 # define MMF_DUMP_MASK_DEFAULT_ELF
(1 << MMF_DUMP_ELF_HEADERS)
491 #else
492 # define MMF_DUMP_MASK_DEFAULT_ELF
0
493 #endif
494
/* leave room for more dump flags */
495 #define MMF_VM_MERGEABLE
16
/* KSM may merge identical pages */
496 #define MMF_VM_HUGEPAGE
17
/* set when VM_HUGEPAGE is set on vm
a */
497 #define MMF_EXE_FILE_CHANGED
18
/* see prctl_set_mm_exe_file() */
498
499 #define MMF_HAS_UPROBES
19
/* has uprobes */
500 #define MMF_RECALC_UPROBES
20
/* MMF_HAS_UPROBES can be wrong */
501
502 #define MMF_INIT_MASK
(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
503
504 struct sighand_struct {
505
atomic_t
count;
506
struct k_sigaction
action[_NSIG];
507
spinlock_t
siglock;
508
wait_queue_head_t
signalfd_wqh;
509 };
510
511 struct pacct_struct {
512
int
ac_flag;
513
long
ac_exitcode;
514
unsigned long
ac_mem;
515
cputime_t
ac_utime, ac_stime;
516
unsigned long
ac_minflt, ac_majflt;
517 };
518
519 struct cpu_itimer {
520
cputime_t expires;
521
cputime_t incr;
522
u32 error;
523
u32 incr_error;
524 };
525
526 /**
527 * struct cputime - snaphsot of system and user cputime

528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587

* @utime: time spent in user mode


* @stime: time spent in system mode
*
* Gathers a generic snapshot of user and system time.
*/
struct cputime {
cputime_t utime;
cputime_t stime;
};
/**
* struct task_cputime - collected CPU time counts
* @utime:
time spent in user mode, in &cputime_t units
* @stime:
time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This is an extension of struct cputime that includes the total runtime
* spent by the task from the scheduler point of view.
*
* As a result, this structure groups together three kinds of CPU time
* that are tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp
stime
#define virt_exp
utime
#define sched_exp
sum_exec_runtime
#define INIT_CPUTIME
\
(struct task_cputime) {
.utime = 0,
.stime = 0,
.sum_exec_runtime = 0,
}
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED
#else
#define PREEMPT_DISABLED
#endif

\
\
\
\

(1 + PREEMPT_ENABLED)
PREEMPT_ENABLED

/*
* Disable preemption until the scheduler is running.
* Reset by start_kernel()->sched_init()->init_idle().
*
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
* before the scheduler is active -- see should_resched().
*/
#define INIT_PREEMPT_COUNT
(PREEMPT_DISABLED + PREEMPT_ACTIVE)
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime:
thread group interval timers.
* @running:
non-zero when there are timers running and

588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
ry
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646

*
@cputime receives updates.
* @lock:
lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
raw_spinlock_t lock;
};
#include <linux/rwsem.h>
struct autogroup;
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t
sigcnt;
atomic_t
live;
int
nr_threads;
struct list_head
thread_head;
wait_queue_head_t

wait_chldexit; /* for wait4() */

/* current thread group signal load-balancing target: */


struct task_struct
*curr_target;
/* shared signal handling: */
struct sigpending
shared_pending;
/* thread group exit support */
int
group_exit_code;
/* overloaded:
* - notify group_exit_task when ->count is equal to notify_count
* - everyone except group_exit_task is stopped during signal delive
* of fatal signals, group_exit_task processes the signal.
*/
int
notify_count;
struct task_struct
*group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int
group_stop_count;
unsigned int
flags; /* see SIGNAL_* flags below */
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
* to this process instead of 'init'. The service manager is
* able to receive SIGCHLD signals and is able to investigate
* the process until it calls wait(). All children of this
* process will inherit a flag if they should look for a
* child_subreaper process at exit.
*/

647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706

unsigned int
unsigned int

is_child_subreaper:1;
has_child_subreaper:1;

/* POSIX.1b Interval Timers */


int
posix_timer_id;
struct list_head
posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
struct pid *leader_pid;
ktime_t it_real_incr;
/*
* ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
* CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
* values are defined to 0 and 1 respectively
*/
struct cpu_itimer it[2];
/*
* Thread group totals for process CPU timers.
* See thread_group_cputimer(), et al, for details.
*/
struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
int leader;
struct tty_struct *tty; /* NULL if no tty */
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
/*
* Cumulative resource counters for dead threads in the group,
* and for reaped dead child processes forked by this group.
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
struct cputime prev_cputime;
#endif
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
/*

707
* Cumulative ns of schedule CPU time fo dead threads in the
708
* group, not including a zombie group leader, (This only differs
709
* from jiffies_to_ns(utime + stime) if sched_clock uses something
710
* other than jiffies.)
711
*/
712
unsigned long long sum_sched_runtime;
713
714
/*
715
* We don't bother to synchronize most readers of this at all,
716
* because there is no reader checking a limit that actually needs
717
* to get both rlim_cur and rlim_max atomically, and either one
718
* alone is a single word that can safely be read normally.
719
* getrlimit/setrlimit use task_lock(current->group_leader) to
720
* protect this instead of the siglock, because they really
721
* have no need to disable irqs.
722
*/
723
struct rlimit rlim[RLIM_NLIMITS];
724
725 #ifdef CONFIG_BSD_PROCESS_ACCT
726
struct pacct_struct pacct;
/* per-process accounting informatio
n */
727 #endif
728 #ifdef CONFIG_TASKSTATS
729
struct taskstats *stats;
730 #endif
731 #ifdef CONFIG_AUDIT
732
unsigned audit_tty;
733
unsigned audit_tty_log_passwd;
734
struct tty_audit_buf *tty_audit_buf;
735 #endif
736 #ifdef CONFIG_CGROUPS
737
/*
738
* group_rwsem prevents new tasks from entering the threadgroup and
739
* member tasks from exiting,a more specifically, setting of
740
* PF_EXITING. fork and exit paths are protected with this rwsem
741
* using threadgroup_change_begin/end(). Users which require
742
* threadgroup to remain stable should use threadgroup_[un]lock()
743
* which also takes care of exec path. Currently, cgroup is the
744
* only user.
745
*/
746
struct rw_semaphore group_rwsem;
747 #endif
748
749
oom_flags_t oom_flags;
750
short oom_score_adj;
/* OOM kill score adjustment */
751
short oom_score_adj_min;
/* OOM kill score adjustment min val
ue.
752
* Only settable by CAP_SYS_RESOURCE
. */
753
754
struct mutex cred_guard_mutex; /* guard against foreign influences
on
755
* credential calculations
756
* (notably. ptrace) */
757 };
758
759 /*
760 * Bits in flags field of signal_struct.
761 */
762 #define SIGNAL_STOP_STOPPED
0x00000001 /* job control stop in effect */

763 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap


*/
764 #define SIGNAL_GROUP_EXIT
0x00000004 /* group exit in progress */
765 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
766 /*
767 * Pending notifications to parent.
768 */
769 #define SIGNAL_CLD_STOPPED
0x00000010
770 #define SIGNAL_CLD_CONTINUED
0x00000020
771 #define SIGNAL_CLD_MASK
(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
772
773 #define SIGNAL_UNKILLABLE
0x00000040 /* for init: ignore fatal signals
*/
774
775 /* If true, all threads except ->group_exit_task have pending SIGKILL */
776 static inline int signal_group_exit(const struct signal_struct *sig)
777 {
778
return (sig->flags & SIGNAL_GROUP_EXIT) ||
779
(sig->group_exit_task != NULL);
780 }
781
782 /*
783 * Some day this will be a full-fledged user tracking system..
784 */
785 struct user_struct {
786
atomic_t __count;
/* reference count */
787
atomic_t processes;
/* How many processes does this user have? *
/
788
atomic_t sigpending;
/* How many pending signals does this user h
ave? */
789 #ifdef CONFIG_INOTIFY_USER
790
atomic_t inotify_watches; /* How many inotify watches does this user
have? */
791
atomic_t inotify_devs; /* How many inotify devs does this user have
opened? */
792 #endif
793 #ifdef CONFIG_FANOTIFY
794
atomic_t fanotify_listeners;
795 #endif
796 #ifdef CONFIG_EPOLL
797
atomic_long_t epoll_watches; /* The number of file descriptors curre
ntly watched */
798 #endif
799 #ifdef CONFIG_POSIX_MQUEUE
800
/* protected by mq_lock */
801
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue
? */
802 #endif
803
unsigned long locked_shm; /* How many pages of mlocked shm ? */
804
805 #ifdef CONFIG_KEYS
806
struct key *uid_keyring;
/* UID specific keyring */
807
struct key *session_keyring;
/* UID's default session keyring */
808 #endif
809
810
/* Hash table maintenance information */
811
struct hlist_node uidhash_node;
812
kuid_t uid;
813
814 #ifdef CONFIG_PERF_EVENTS

815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
/
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
.
855
856
857
858
859
860
861
862
863
864
865
/
866
867
*/
868
869
870

atomic_long_t locked_vm;
#endif
};
extern int uids_sysfs_init(void);
extern struct user_struct *find_user(kuid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
struct backing_dev_info;
struct reclaim_state;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
unsigned long pcount;
/* # of times run on this cpu */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run *
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
spinlock_t
lock;
unsigned int
flags; /* Private per-task flags */
/*
*
*
*
*
*
*
*

For each stat XXX, add following, aligned appropriately


struct timespec XXX_start, XXX_end;
u64 XXX_delay;
u32 XXX_count;
Atomicity of updates to XXX_delay, XXX_count protected by
single lock above (split into XXX_lock if contention is an issue)

*/
/*
* XXX_count is incremented on every XXX operation, the delay
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start;
/* Shared by blkio, swapin */
u64 blkio_delay;
/* wait for sync block io completion */
u64 swapin_delay;
/* wait for swapin block io completion */
u32 blkio_count;
/* total count of the number of sync block *
u32 swapin_count;

/* io operations performed */
/* total count of the number of swapin block
/* io operations performed */

u64 freepages_start;

871
u64 freepages_delay;
/* wait for memory reclaim */
872
u32 freepages_count;
/* total count of memory reclaim */
873 };
874 #endif /* CONFIG_TASK_DELAY_ACCT */
875
876 static inline int sched_info_on(void)
877 {
878 #ifdef CONFIG_SCHEDSTATS
879
return 1;
880 #elif defined(CONFIG_TASK_DELAY_ACCT)
881
extern int delayacct_on;
882
return delayacct_on;
883 #else
884
return 0;
885 #endif
886 }
887
888 enum cpu_idle_type {
889
CPU_IDLE,
890
CPU_NOT_IDLE,
891
CPU_NEWLY_IDLE,
892
CPU_MAX_IDLE_TYPES
893 };
894
895 /*
896 * Increase resolution of cpu_capacity calculations
897 */
898 #define SCHED_CAPACITY_SHIFT
10
899 #define SCHED_CAPACITY_SCALE
(1L << SCHED_CAPACITY_SHIFT)
900
901 /*
902 * sched-domains (multiprocessor balancing) declarations:
903 */
904 #ifdef CONFIG_SMP
905 #define SD_LOAD_BALANCE
0x0001 /* Do load balancing on this domain.
*/
906 #define SD_BALANCE_NEWIDLE
0x0002 /* Balance when about to become idle
*/
907 #define SD_BALANCE_EXEC
0x0004 /* Balance on exec */
908 #define SD_BALANCE_FORK
0x0008 /* Balance on fork, clone */
909 #define SD_BALANCE_WAKE
0x0010 /* Balance on wakeup */
910 #define SD_WAKE_AFFINE
0x0020 /* Wake task to waking CPU */
911 #define SD_SHARE_CPUCAPACITY
0x0080 /* Domain members share cpu power */
912 #define SD_SHARE_POWERDOMAIN
0x0100 /* Domain members share power domain
*/
913 #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg reso
urces */
914 #define SD_SERIALIZE
0x0400 /* Only a single load balancing inst
ance */
915 #define SD_ASYM_PACKING
0x0800 /* Place busy groups earlier in the
domain */
916 #define SD_PREFER_SIBLING
0x1000 /* Prefer to place tasks in a siblin
g domain */
917 #define SD_OVERLAP
0x2000 /* sched_domains of this level overl
ap */
918 #define SD_NUMA
0x4000 /* cross-node balancing */
919
920 #ifdef CONFIG_SCHED_SMT
921 static inline int cpu_smt_flags(void)
922 {

923
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
924 }
925 #endif
926
927 #ifdef CONFIG_SCHED_MC
928 static inline int cpu_core_flags(void)
929 {
930
return SD_SHARE_PKG_RESOURCES;
931 }
932 #endif
933
934 #ifdef CONFIG_NUMA
935 static inline int cpu_numa_flags(void)
936 {
937
return SD_NUMA;
938 }
939 #endif
940
941 struct sched_domain_attr {
942
int relax_domain_level;
943 };
944
945 #define SD_ATTR_INIT
(struct sched_domain_attr) {
\
946
.relax_domain_level = -1,
\
947 }
948
949 extern int sched_domain_level_max;
950
951 struct sched_group;
952
953 struct sched_domain {
954
/* These fields must be setup */
955
struct sched_domain *parent;
/* top domain must be null terminate
d */
956
struct sched_domain *child;
/* bottom domain must be null termin
ated */
957
struct sched_group *groups;
/* the balancing groups of the domai
n */
958
unsigned long min_interval;
/* Minimum balance interval ms */
959
unsigned long max_interval;
/* Maximum balance interval ms */
960
unsigned int busy_factor;
/* less balancing by factor if busy
*/
961
unsigned int imbalance_pct;
/* No balance until over watermark *
/
962
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries
*/
963
unsigned int busy_idx;
964
unsigned int idle_idx;
965
unsigned int newidle_idx;
966
unsigned int wake_idx;
967
unsigned int forkexec_idx;
968
unsigned int smt_gain;
969
970
int nohz_idle;
/* NOHZ IDLE status */
971
int flags;
/* See SD_* */
972
int level;
973
974
/* Runtime fields. */
975
unsigned long last_balance;
/* init to jiffies. units in jiffies
*/

976
unsigned int balance_interval; /* initialise to 1. units in ms. */
977
unsigned int nr_balance_failed; /* initialise to 0 */
978
979
/* idle_balance() stats */
980
u64 max_newidle_lb_cost;
981
unsigned long next_decay_max_lb_cost;
982
983 #ifdef CONFIG_SCHEDSTATS
984
/* load_balance() stats */
985
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
986
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
987
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
988
unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
989
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
990
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
991
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
992
unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
993
994
/* Active load balancing */
995
unsigned int alb_count;
996
unsigned int alb_failed;
997
unsigned int alb_pushed;
998
999
/* SD_BALANCE_EXEC stats */
1000
unsigned int sbe_count;
1001
unsigned int sbe_balanced;
1002
unsigned int sbe_pushed;
1003
1004
/* SD_BALANCE_FORK stats */
1005
unsigned int sbf_count;
1006
unsigned int sbf_balanced;
1007
unsigned int sbf_pushed;
1008
1009
/* try_to_wake_up() stats */
1010
unsigned int ttwu_wake_remote;
1011
unsigned int ttwu_move_affine;
1012
unsigned int ttwu_move_balance;
1013 #endif
1014 #ifdef CONFIG_SCHED_DEBUG
1015
char *name;
1016 #endif
1017
union {
1018
void *private;
/* used during construction */
1019
struct rcu_head rcu;
/* used during destruction */
1020
};
1021
1022
unsigned int span_weight;
1023
/*
1024
* Span of all CPUs in this domain.
1025
*
1026
* NOTE: this field is variable length. (Allocated dynamically
1027
* by attaching extra space to the end of the structure,
1028
* depending on how many CPUs the kernel has booted up with)
1029
*/
1030
unsigned long span[0];
1031 };
1032
1033 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1034 {
1035
return to_cpumask(sd->span);

1036
1037
1038
,
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094

}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[]
struct sched_domain_attr *dattr_new);
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP
struct sd_data
struct
struct
struct
};

0x01

{
sched_domain **__percpu sd;
sched_group **__percpu sg;
sched_group_capacity **__percpu sgc;

struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
int
flags;
int
numa_level;
struct sd_data
data;
#ifdef CONFIG_SCHED_DEBUG
char
*name;
#endif
};
extern struct sched_domain_topology_level *sched_domain_topology;
extern void set_sched_topology(struct sched_domain_topology_level *tl);
extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type)
#else
# define SD_INIT_NAME(type)
#endif

.name = #type

#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
}
static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
return true;
}

1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
nd
1120
all
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152

#endif /* !CONFIG_SMP */
struct io_context;

/* See blkdev.h */

#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
struct
struct
struct
struct

audit_context;
mempolicy;
pipe_inode_info;
uts_namespace;

/* See audit.c */

struct load_weight {
unsigned long weight;
u32 inv_weight;
};
struct sched_avg {
/*
* These sums represent an infinite geometric series and so are bou
* above by 1024/(1-y). Thus we only need a u32 to store them for
* choices of y < 1-2^(-32)*1024.
*/
u32 runnable_avg_sum, runnable_avg_period;
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
};
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
u64
u64
u64
u64
u64
u64

wait_start;
wait_max;
wait_count;
wait_sum;
iowait_count;
iowait_sum;

u64
u64
s64

sleep_start;
sleep_max;
sum_sleep_runtime;

u64
u64
u64
u64

block_start;
block_max;
exec_max;
slice_max;

u64
u64
u64
u64
u64

nr_migrations_cold;
nr_failed_migrations_affine;
nr_failed_migrations_running;
nr_failed_migrations_hot;
nr_forced_migrations;

1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212

u64
u64
u64
u64
u64
u64
u64
u64
u64

nr_wakeups;
nr_wakeups_sync;
nr_wakeups_migrate;
nr_wakeups_local;
nr_wakeups_remote;
nr_wakeups_affine;
nr_wakeups_affine_attempts;
nr_wakeups_passive;
nr_wakeups_idle;

};
#endif
struct sched_entity {
struct load_weight
struct rb_node
struct list_head
unsigned int

load;
run_node;
group_node;
on_rq;

/* for load-balancing */

u64
u64
u64
u64

exec_start;
sum_exec_runtime;
vruntime;
prev_sum_exec_runtime;

u64

nr_migrations;

#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
int
depth;
struct sched_entity
*parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq
*cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq
*my_q;
#endif
#ifdef CONFIG_SMP
/* Per-entity load-tracking */
struct sched_avg
avg;
#endif
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
struct rt_rq
*rt_rq;
/* rq "owned" by this entity/group: */
struct rt_rq
*my_q;
#endif
};

1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272

struct sched_dl_entity {
struct rb_node rb_node;
/*
* Original scheduling parameters. Copied here from sched_attr
* during sched_setattr(), they will remain the same until
* the next sched_setattr().
*/
u64 dl_runtime;
/* maximum runtime for each instance
u64 dl_deadline;
/* relative deadline of each instance
u64 dl_period;
/* separation of two instances (period)
u64 dl_bw;
/* dl_runtime / dl_deadline

*/
*/
*/
*/

/*
* Actual scheduling parameters. Initialized with the values above,
* they are continously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime;
/* remaining runtime for this instance */
u64 deadline;
/* absolute deadline for this instance */
unsigned int flags;
/* specifying the scheduler behaviour */
/*
* Some bool flags:
*
* @dl_throttled tells if we exhausted the runtime. If so, the
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
* @dl_new tells if a new instance arrived. If so we must
* start executing it with full runtime and reset its absolute
* deadline;
*
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
* exit the critical section);
*
* @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job.
*/
int dl_throttled, dl_new, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
* own bandwidth to be enforced, thus we need one timer per task.
*/
struct hrtimer dl_timer;
};
union rcu_special {
struct {
bool blocked;
bool need_qs;
} b;
short s;
};
struct rcu_node;
enum perf_event_task_context {
perf_invalid_context = -1,

1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332

perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
struct task_struct {
volatile long state;
void *stack;
atomic_t usage;
unsigned int flags;
unsigned int ptrace;

/* -1 unrunnable, 0 runnable, >0 stopped */


/* per process flags, defined below */

#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
struct task_struct *last_wakee;
unsigned long wakee_flips;
unsigned long wakee_flip_decay_ts;
int wake_cpu;
#endif
int on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
bool rcu_tasks_holdout;
struct list_head rcu_tasks_holdout_list;
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */

1333
1334 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1335
struct sched_info sched_info;
1336 #endif
1337
1338
struct list_head tasks;
1339 #ifdef CONFIG_SMP
1340
struct plist_node pushable_tasks;
1341
struct rb_node pushable_dl_tasks;
1342 #endif
1343
1344
struct mm_struct *mm, *active_mm;
1345 #ifdef CONFIG_COMPAT_BRK
1346
unsigned brk_randomized:1;
1347 #endif
1348
/* per-thread vma caching */
1349
u32 vmacache_seqnum;
1350
struct vm_area_struct *vmacache[VMACACHE_SIZE];
1351 #if defined(SPLIT_RSS_COUNTING)
1352
struct task_rss_stat
rss_stat;
1353 #endif
1354 /* task state */
1355
int exit_state;
1356
int exit_code, exit_signal;
1357
int pdeath_signal; /* The signal sent when the parent dies */
1358
unsigned int jobctl;
/* JOBCTL_*, siglock protected */
1359
1360
/* Used for emulating ABI behavior of previous Linux versions */
1361
unsigned int personality;
1362
1363
unsigned in_execve:1; /* Tell the LSMs that the process is doing
an
1364
* execve */
1365
unsigned in_iowait:1;
1366
1367
/* Revert to default priority/policy when forking */
1368
unsigned sched_reset_on_fork:1;
1369
unsigned sched_contributes_to_load:1;
1370
1371 #ifdef CONFIG_MEMCG_KMEM
1372
unsigned memcg_kmem_skip_account:1;
1373 #endif
1374
1375
unsigned long atomic_flags; /* Flags needing atomic access. */
1376
1377
struct restart_block restart_block;
1378
1379
pid_t pid;
1380
pid_t tgid;
1381
1382 #ifdef CONFIG_CC_STACKPROTECTOR
1383
/* Canary value for the -fstack-protector gcc feature */
1384
unsigned long stack_canary;
1385 #endif
1386
/*
1387
* pointers to (original) parent process, youngest child, younger s
ibling,
1388
* older sibling, respectively. (p->father can be replaced with
1389
* p->real_parent->pid)
1390
*/

1391
struct task_struct __rcu *real_parent; /* real parent process */
1392
struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4()
reports */
1393
/*
1394
* children/sibling forms the list of my natural children
1395
*/
1396
struct list_head children;
/* list of my children */
1397
struct list_head sibling;
/* linkage in my parent's children
list */
1398
struct task_struct *group_leader;
/* threadgroup leader */
1399
1400
/*
1401
* ptraced is the list of tasks this task is using ptrace on.
1402
* This includes both natural children and PTRACE_ATTACH targets.
1403
* p->ptrace_entry is p's link on the p->parent->ptraced list.
1404
*/
1405
struct list_head ptraced;
1406
struct list_head ptrace_entry;
1407
1408
/* PID/PID hash table linkage. */
1409
struct pid_link pids[PIDTYPE_MAX];
1410
struct list_head thread_group;
1411
struct list_head thread_node;
1412
1413
struct completion *vfork_done;
/* for vfork() */
1414
int __user *set_child_tid;
/* CLONE_CHILD_SETTID */
1415
int __user *clear_child_tid;
/* CLONE_CHILD_CLEARTID */
1416
1417
cputime_t utime, stime, utimescaled, stimescaled;
1418
cputime_t gtime;
1419 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1420
struct cputime prev_cputime;
1421 #endif
1422 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1423
seqlock_t vtime_seqlock;
1424
unsigned long long vtime_snap;
1425
enum {
1426
VTIME_SLEEPING = 0,
1427
VTIME_USER,
1428
VTIME_SYS,
1429
} vtime_snap_whence;
1430 #endif
1431
unsigned long nvcsw, nivcsw; /* context switch counts */
1432
u64 start_time;
/* monotonic time in nsec */
1433
u64 real_start_time;
/* boot based time in nsec */
1434 /* mm fault and swap info: this can arguably be seen as either mm-specific
or thread-specific */
1435
unsigned long min_flt, maj_flt;
1436
1437
struct task_cputime cputime_expires;
1438
struct list_head cpu_timers[3];
1439
1440 /* process credentials */
1441
const struct cred __rcu *real_cred; /* objective and real subjectiv
e task
1442
* credentials (COW) */
1443
const struct cred __rcu *cred; /* effective (overridable) subjecti
ve task
1444
* credentials (COW) */
1445
char comm[TASK_COMM_LEN]; /* executable name excluding path

1446
- access with [gs]et_task_comm (which
lock
1447
it with task_lock())
1448
- initialized normally by setup_new_ex
ec */
1449 /* file system info */
1450
int link_count, total_link_count;
1451 #ifdef CONFIG_SYSVIPC
1452 /* ipc stuff */
1453
struct sysv_sem sysvsem;
1454
struct sysv_shm sysvshm;
1455 #endif
1456 #ifdef CONFIG_DETECT_HUNG_TASK
1457 /* hung task detection */
1458
unsigned long last_switch_count;
1459 #endif
1460 /* CPU-specific state of this task */
1461
struct thread_struct thread;
1462 /* filesystem information */
1463
struct fs_struct *fs;
1464 /* open file information */
1465
struct files_struct *files;
1466 /* namespaces */
1467
struct nsproxy *nsproxy;
1468 /* signal handlers */
1469
struct signal_struct *signal;
1470
struct sighand_struct *sighand;
1471
1472
sigset_t blocked, real_blocked;
1473
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was us
ed */
1474
struct sigpending pending;
1475
1476
unsigned long sas_ss_sp;
1477
size_t sas_ss_size;
1478
int (*notifier)(void *priv);
1479
void *notifier_data;
1480
sigset_t *notifier_mask;
1481
struct callback_head *task_works;
1482
1483
struct audit_context *audit_context;
1484 #ifdef CONFIG_AUDITSYSCALL
1485
kuid_t loginuid;
1486
unsigned int sessionid;
1487 #endif
1488
struct seccomp seccomp;
1489
1490 /* Thread group tracking */
1491
u32 parent_exec_id;
1492
u32 self_exec_id;
1493 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowe
d,
1494 * mempolicy */
1495
spinlock_t alloc_lock;
1496
1497
/* Protection of the PI data structures: */
1498
raw_spinlock_t pi_lock;
1499
1500 #ifdef CONFIG_RT_MUTEXES
1501
/* PI waiters blocked on a rt_mutex held by this task */

1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561

struct rb_root pi_waiters;


struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
int hardirqs_enabled;
int hardirq_context;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
void *journal_info;
/* stacked block device info */
struct bio_list *bio_list;
#ifdef CONFIG_BLOCK
/* stack plugging */
struct blk_plug *plug;
#endif
/* VM state */
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1;
/* accumulated rss usage */
u64 acct_vm_mem1;
/* accumulated virtual memory usage */
cputime_t acct_timexpd; /* stime + utime since last update */
#endif

1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
er
1612
1613
1614
ode
1615
1616
1617
MA
1618

#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed;
/* Protected by alloc_lock */
seqcount_t mems_allowed_seq;
/* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy;
/* Protected by alloc_lock */
short il_next;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
unsigned long numa_migrate_retry;
u64 node_stamp;
/* migration stamp */
u64 last_task_numa_placement;
u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct list_head numa_entry;
struct numa_group *numa_group;
/*
* numa_faults is an array split into four regions:
* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buff
* in this precise order.
*
* faults_memory: Exponential decaying average of faults on a per-n
* basis. Scheduling placement decisions are made based on these
* counts. The values remain static for the duration of a PTE scan.
* faults_cpu: Track the nodes the process was running on when a NU
* hinting fault was incurred.

1619
de
1620
nts
1621
d.
1622
1623
1624
1625
1626
1627
1628
n
1629
rent
1630
1631
1632
1633
1634
1635 #endif
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 #ifdef
1647
1648 #endif
1649 #ifdef
1650
1651 #endif
1652
1653
1654
1655
1656
1657
1658
od */
1659
1660 #ifdef
1661
1662
1663 #endif
1664
1665
1666
1667
1668
1669
1670
1671 #ifdef
1672

* faults_memory_buffer and faults_cpu_buffer: Record faults per no


* during the current scan window. When the scan completes, the cou
* in faults_memory and faults_cpu decay and these values are copie
*/
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local or failed to migrate. The task sca
* period is adapted based on the locality of the faults with diffe
* weights depending on whether they were shared or private faults
*/
unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
/* CONFIG_NUMA_BALANCING */
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
CONFIG_FAULT_INJECTION
int make_it_fail;
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
*/
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when; /* start of a write-and-pause peri
CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
CONFIG_KASAN
unsigned int kasan_depth;

1673 #endif
1674 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1675
/* Index of current stored address in ret_stack */
1676
int curr_ret_stack;
1677
/* Stack of return addresses for return function tracing */
1678
struct ftrace_ret_stack *ret_stack;
1679
/* time stamp for last schedule */
1680
unsigned long long ftrace_timestamp;
1681
/*
1682
* Number of functions that haven't been traced
1683
* because of depth overrun.
1684
*/
1685
atomic_t trace_overrun;
1686
/* Pause for the tracing */
1687
atomic_t tracing_graph_pause;
1688 #endif
1689 #ifdef CONFIG_TRACING
1690
/* state flags for use by tracers */
1691
unsigned long trace;
1692
/* bitmask and counter of trace recursion */
1693
unsigned long trace_recursion;
1694 #endif /* CONFIG_TRACING */
1695 #ifdef CONFIG_MEMCG
1696
struct memcg_oom_info {
1697
struct mem_cgroup *memcg;
1698
gfp_t gfp_mask;
1699
int order;
1700
unsigned int may_oom:1;
1701
} memcg_oom;
1702 #endif
1703 #ifdef CONFIG_UPROBES
1704
struct uprobe_task *utask;
1705 #endif
1706 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1707
unsigned int
sequential_io;
1708
unsigned int
sequential_io_avg;
1709 #endif
1710 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1711
unsigned long task_state_change;
1712 #endif
1713 };
1714
1715 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1716 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1717
1718 #define TNF_MIGRATED
0x01
1719 #define TNF_NO_GROUP
0x02
1720 #define TNF_SHARED
0x04
1721 #define TNF_FAULT_LOCAL 0x08
1722 #define TNF_MIGRATE_FAIL 0x10
1723
1724 #ifdef CONFIG_NUMA_BALANCING
1725 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1726 extern pid_t task_numa_group_id(struct task_struct *p);
1727 extern void set_numabalancing_state(bool enabled);
1728 extern void task_numa_free(struct task_struct *p);
1729 extern bool should_numa_migrate_memory(struct task_struct *p, struct page *
page,
1730
int src_nid, int dst_cpu);
1731 #else

1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
)
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
of
1786
1787
1788
1789

static inline void task_numa_fault(int last_node, int node, int pages,


int flags)
{
}
static inline pid_t task_numa_group_id(struct task_struct *p)
{
return 0;
}
static inline void set_numabalancing_state(bool enabled)
{
}
static inline void task_numa_free(struct task_struct *p)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
struct page *page, int src_nid, int dst_cpu
{
return true;
}
#endif
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
static inline struct pid *task_tgid(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
/*
* Without tasklist or rcu lock it is not safe to dereference
* the result of task_pgrp/task_session even if task == current,
* we can race with another thread doing sys_setsid/sys_setpgid.
*/
static inline struct pid *task_pgrp(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
static inline struct pid *task_session(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
struct pid_namespace;
/*
*
*
*
*
*

the helpers to get the task's different pids as they are seen
from various namespaces
task_xid_nr()
task_xid_vnr()

: global id, i.e. the id seen from the init namespace;


: virtual id, i.e. the id seen from the pid namespace

*
current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* set_task_vxid() : assigns a virtual id to a task;

1790 *
1791 * see also pid_nr() etc in include/linux/pid.h
1792 */
1793 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1794
struct pid_namespace *ns);
1795
1796 static inline pid_t task_pid_nr(struct task_struct *tsk)
1797 {
1798
return tsk->pid;
1799 }
1800
1801 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1802
struct pid_namespace *ns)
1803 {
1804
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1805 }
1806
1807 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1808 {
1809
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1810 }
1811
1812
1813 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1814 {
1815
return tsk->tgid;
1816 }
1817
1818 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1819
1820 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1821 {
1822
return pid_vnr(task_tgid(tsk));
1823 }
1824
1825
1826 static inline int pid_alive(const struct task_struct *p);
1827 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct p
id_namespace *ns)
1828 {
1829
pid_t pid = 0;
1830
1831
rcu_read_lock();
1832
if (pid_alive(tsk))
1833
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns
);
1834
rcu_read_unlock();
1835
1836
return pid;
1837 }
1838
1839 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1840 {
1841
return task_ppid_nr_ns(tsk, &init_pid_ns);
1842 }
1843
1844 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1845
struct pid_namespace *ns)
1846 {
1847
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);

1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
d.
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906

}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
/**
* is_global_init - check if a task structure is init
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel create
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return tsk->pid == 1;
}
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct(struct task_struct *t);

1907
1908 static inline void put_task_struct(struct task_struct *t)
1909 {
1910
if (atomic_dec_and_test(&t->usage))
1911
__put_task_struct(t);
1912 }
1913
1914 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1915 extern void task_cputime(struct task_struct *t,
1916
cputime_t *utime, cputime_t *stime);
1917 extern void task_cputime_scaled(struct task_struct *t,
1918
cputime_t *utimescaled, cputime_t *stimesca
led);
1919 extern cputime_t task_gtime(struct task_struct *t);
1920 #else
1921 static inline void task_cputime(struct task_struct *t,
1922
cputime_t *utime, cputime_t *stime)
1923 {
1924
if (utime)
1925
*utime = t->utime;
1926
if (stime)
1927
*stime = t->stime;
1928 }
1929
1930 static inline void task_cputime_scaled(struct task_struct *t,
1931
cputime_t *utimescaled,
1932
cputime_t *stimescaled)
1933 {
1934
if (utimescaled)
1935
*utimescaled = t->utimescaled;
1936
if (stimescaled)
1937
*stimescaled = t->stimescaled;
1938 }
1939
1940 static inline cputime_t task_gtime(struct task_struct *t)
1941 {
1942
return t->gtime;
1943 }
1944 #endif
1945 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cpu
time_t *st);
1946 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t
*ut, cputime_t *st);
1947
1948 /*
1949 * Per process flags
1950 */
1951 #define PF_EXITING
0x00000004
/* getting shut down */
1952 #define PF_EXITPIDONE 0x00000008
/* pi exit done on shut down */
1953 #define PF_VCPU
0x00000010
/* I'm a virtual CPU */
1954 #define PF_WQ_WORKER
0x00000020
/* I'm a workqueue worker */
1955 #define PF_FORKNOEXEC 0x00000040
/* forked but didn't exec */
1956 #define PF_MCE_PROCESS 0x00000080
/* process policy on mce errors */
1957 #define PF_SUPERPRIV
0x00000100
/* used super-user privileges */
1958 #define PF_DUMPCORE
0x00000200
/* dumped core */
1959 #define PF_SIGNALED
0x00000400
/* killed by a signal */
1960 #define PF_MEMALLOC
0x00000800
/* Allocating memory */
1961 #define PF_NPROC_EXCEEDED 0x00001000
/* set_user noticed that RLIMIT_NPR
OC was exceeded */
1962 #define PF_USED_MATH
0x00002000
/* if unset the fpu must be initial

ized before use */


1963 #define PF_USED_ASYNC 0x00004000
/* used async_schedule*(), used by
module init */
1964 #define PF_NOFREEZE
0x00008000
/* this thread should not be frozen
*/
1965 #define PF_FROZEN
0x00010000
/* frozen for system suspend */
1966 #define PF_FSTRANS
0x00020000
/* inside a filesystem transaction
*/
1967 #define PF_KSWAPD
0x00040000
/* I am kswapd */
1968 #define PF_MEMALLOC_NOIO 0x00080000
/* Allocating memory without IO inv
olved */
1969 #define PF_LESS_THROTTLE 0x00100000
/* Throttle me less: I clean memory
*/
1970 #define PF_KTHREAD
0x00200000
/* I am a kernel thread */
1971 #define PF_RANDOMIZE
0x00400000
/* randomize virtual address space
*/
1972 #define PF_SWAPWRITE
0x00800000
/* Allowed to write to swap */
1973 #define PF_NO_SETAFFINITY 0x04000000
/* Userland is not allowed to meddl
e with cpus_allowed */
1974 #define PF_MCE_EARLY
0x08000000
/* Early kill for mce process polic
y */
1975 #define PF_MUTEX_TESTER 0x20000000
/* Thread belongs to the rt mutex t
ester */
1976 #define PF_FREEZER_SKIP 0x40000000
/* Freezer should not count it as f
reezable */
1977 #define PF_SUSPEND_TASK 0x80000000
/* this thread called freeze_proces
ses and should not be frozen */
1978
1979 /*
1980 * Only the _current_ task can read/write to tsk->flags, but other
1981 * tasks can access tsk->flags in readonly mode for example
1982 * with tsk_used_math (like during threaded core dumping).
1983 * There is however an exception to this rule during ptrace
1984 * or during fork: the ptracer task is allowed to write to the
1985 * child->flags of its traced child (same goes for fork, the parent
1986 * can write to the child->flags), because we're guaranteed the
1987 * child is not running and in turn not changing child->flags
1988 * at the same time the parent does it.
1989 */
1990 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USE
D_MATH; } while (0)
1991 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_M
ATH; } while (0)
1992 #define clear_used_math() clear_stopped_child_used_math(current)
1993 #define set_used_math() set_stopped_child_used_math(current)
1994 #define conditional_stopped_child_used_math(condition, child) \
1995
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition)
? PF_USED_MATH : 0; } while (0)
1996 #define conditional_used_math(condition) \
1997
conditional_stopped_child_used_math(condition, current)
1998 #define copy_to_stopped_child_used_math(child) \
1999
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->fl
ags & PF_USED_MATH; } while (0)
2000 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2001 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2002 #define used_math() tsk_used_math(current)
2003
2004 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2005 * __GFP_FS is also cleared as it implies __GFP_IO.
2006 */

2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
*/
2060
2061
2062
2063
2064
*/

static inline gfp_t memalloc_noio_flags(gfp_t flags)


{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
current->flags |= PF_MEMALLOC_NOIO;
return flags;
}
static inline void memalloc_noio_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0
/* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1
/* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2
/* Spread some slab caches over cpuset */
#define TASK_PFA_TEST(name, func)
static inline bool task_##func(struct task_struct *p)
{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func)
static inline void task_set_##func(struct task_struct *p)
{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func)
static inline void task_clear_##func(struct task_struct *p)
{ clear_bit(PFA_##name, &p->atomic_flags); }

\
\
\
\
\
\

TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
*/
#define JOBCTL_STOP_SIGMASK

0xffff /* signr of the last group stop */

#define JOBCTL_STOP_DEQUEUED_BIT 16
#define JOBCTL_STOP_PENDING_BIT 17

/* stop signal dequeued */


/* task should stop for group stop

#define
#define
#define
#define
#define

/*
/*
/*
/*
/*

JOBCTL_STOP_CONSUME_BIT
JOBCTL_TRAP_STOP_BIT
JOBCTL_TRAP_NOTIFY_BIT
JOBCTL_TRAPPING_BIT
JOBCTL_LISTENING_BIT

18
19
20
21
22

consume group stop count */


trap for STOP */
trap for NOTIFY */
switching to TRACED */
ptracer is listening for events

2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
gs)
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123

#define
#define
#define
#define
#define
#define
#define

JOBCTL_STOP_DEQUEUED
JOBCTL_STOP_PENDING
JOBCTL_STOP_CONSUME
JOBCTL_TRAP_STOP
JOBCTL_TRAP_NOTIFY
JOBCTL_TRAPPING
JOBCTL_LISTENING

#define JOBCTL_TRAP_MASK
#define JOBCTL_PENDING_MASK

(1
(1
(1
(1
(1
(1
(1

<<
<<
<<
<<
<<
<<
<<

JOBCTL_STOP_DEQUEUED_BIT)
JOBCTL_STOP_PENDING_BIT)
JOBCTL_STOP_CONSUME_BIT)
JOBCTL_TRAP_STOP_BIT)
JOBCTL_TRAP_NOTIFY_BIT)
JOBCTL_TRAPPING_BIT)
JOBCTL_LISTENING_BIT)

(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)

extern bool task_set_jobctl_pending(struct task_struct *task,


unsigned int mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
unsigned int mask);
static inline void rcu_copy_process(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_read_unlock_special.s = 0;
p->rcu_blocked_node = NULL;
INIT_LIST_HEAD(&p->rcu_node_entry);
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
p->rcu_tasks_holdout = false;
INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
p->rcu_tasks_idle_cpu = -1;
#endif /* #ifdef CONFIG_TASKS_RCU */
}
static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long fla
{
task->flags &= ~flags;
task->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
{
}
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask)
{
if (!cpumask_test_cpu(0, new_mask))

2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
k)
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182

return -EINVAL;
return 0;
}
#endif
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mas
{
return set_cpus_allowed_ptr(p, &new_mask);
}
#endif
/*
* Do not use outside of architecture code which knows its limitations.
*
* sched_clock() has no promise of monotonicity or bounded drift between
* CPUs, use (which you should not) requires disabling IRQs.
*
* Please use one of the three interfaces below.
*/
extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
}
static inline void sched_clock_idle_sleep_event(void)
{
}
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/

2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
k.
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241

extern int sched_clock_stable(void);


extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* An i/f to runtime opt-in for irq time accounting based off of sched_cloc
* The reason for this explicit opt-in is not to have perf penalty with
* slow sched_clocks.
*/
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif
extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
#endif
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef
extern
extern
#else
static
#endif

CONFIG_NO_HZ_FULL
bool sched_can_stop_tick(void);
u64 scheduler_tick_max_deferment(void);

#ifdef
extern
extern
extern
extern

CONFIG_SCHED_AUTOGROUP
void sched_autogroup_create_attach(struct task_struct *p);
void sched_autogroup_detach(struct task_struct *p);
void sched_autogroup_fork(struct signal_struct *sig);
void sched_autogroup_exit(struct signal_struct *sig);

inline bool sched_can_stop_tick(void) { return false; }

2242 #ifdef CONFIG_PROC_FS


2243 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct se
q_file *m);
2244 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2245 #endif
2246 #else
2247 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2248 static inline void sched_autogroup_detach(struct task_struct *p) { }
2249 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2250 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2251 #endif
2252
2253 extern int yield_to(struct task_struct *p, bool preempt);
2254 extern void set_user_nice(struct task_struct *p, long nice);
2255 extern int task_prio(const struct task_struct *p);
2256 /**
2257 * task_nice - return the nice value of a given task.
2258 * @p: the task in question.
2259 *
2260 * Return: The nice value [ -20 ... 0 ... 19 ].
2261 */
2262 static inline int task_nice(const struct task_struct *p)
2263 {
2264
return PRIO_TO_NICE((p)->static_prio);
2265 }
2266 extern int can_nice(const struct task_struct *p, const int nice);
2267 extern int task_curr(const struct task_struct *p);
2268 extern int idle_cpu(int cpu);
2269 extern int sched_setscheduler(struct task_struct *, int,
2270
const struct sched_param *);
2271 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2272
const struct sched_param *);
2273 extern int sched_setattr(struct task_struct *,
2274
const struct sched_attr *);
2275 extern struct task_struct *idle_task(int cpu);
2276 /**
2277 * is_idle_task - is the specified task an idle task?
2278 * @p: the task in question.
2279 *
2280 * Return: 1 if @p is an idle task. 0 otherwise.
2281 */
2282 static inline bool is_idle_task(const struct task_struct *p)
2283 {
2284
return p->pid == 0;
2285 }
2286 extern struct task_struct *curr_task(int cpu);
2287 extern void set_curr_task(int cpu, struct task_struct *p);
2288
2289 void yield(void);
2290
2291 /*
2292 * The default (Linux) execution domain.
2293 */
2294 extern struct exec_domain
default_exec_domain;
2295
2296 union thread_union {
2297
struct thread_info thread_info;
2298
unsigned long stack[THREAD_SIZE/sizeof(long)];
2299 };
2300

2301 #ifndef __HAVE_ARCH_KSTACK_END


2302 static inline int kstack_end(void *addr)
2303 {
2304
/* Reliable end of stack detection:
2305
* Some APM bios versions misalign the stack
2306
*/
2307
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeo
f(void*)));
2308 }
2309 #endif
2310
2311 extern union thread_union init_thread_union;
2312 extern struct task_struct init_task;
2313
2314 extern struct mm_struct init_mm;
2315
2316 extern struct pid_namespace init_pid_ns;
2317
2318 /*
2319 * find a task by one of its numerical ids
2320 *
2321 * find_task_by_pid_ns():
2322 *
finds a task by its pid in the specified namespace
2323 * find_task_by_vpid():
2324 *
finds a task by its virtual pid
2325 *
2326 * see also find_vpid() etc in include/linux/pid.h
2327 */
2328
2329 extern struct task_struct *find_task_by_vpid(pid_t nr);
2330 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2331
struct pid_namespace *ns);
2332
2333 /* per-UID process charging. */
2334 extern struct user_struct * alloc_uid(kuid_t);
2335 static inline struct user_struct *get_uid(struct user_struct *u)
2336 {
2337
atomic_inc(&u->__count);
2338
return u;
2339 }
2340 extern void free_uid(struct user_struct *);
2341
2342 #include <asm/current.h>
2343
2344 extern void xtime_update(unsigned long ticks);
2345
2346 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2347 extern int wake_up_process(struct task_struct *tsk);
2348 extern void wake_up_new_task(struct task_struct *tsk);
2349 #ifdef CONFIG_SMP
2350 extern void kick_process(struct task_struct *tsk);
2351 #else
2352 static inline void kick_process(struct task_struct *tsk) { }
2353 #endif
2354 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2355 extern void sched_dead(struct task_struct *p);
2356
2357 extern void proc_caches_init(void);
2358 extern void flush_signals(struct task_struct *);
2359 extern void __flush_signals(struct task_struct *);

2360 extern void ignore_signals(struct task_struct *);


2361 extern void flush_signal_handlers(struct task_struct *, int force_default);
2362 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_
t *info);
2363
2364 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *ma
sk, siginfo_t *info)
2365 {
2366
unsigned long flags;
2367
int ret;
2368
2369
spin_lock_irqsave(&tsk->sighand->siglock, flags);
2370
ret = dequeue_signal(tsk, mask, info);
2371
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2372
2373
return ret;
2374 }
2375
2376 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2377
sigset_t *mask);
2378 extern void unblock_all_signals(void);
2379 extern void release_task(struct task_struct * p);
2380 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2381 extern int force_sigsegv(int, struct task_struct *);
2382 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2383 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp
);
2384 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2385 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2386
const struct cred *, u32);
2387 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2388 extern int kill_pid(struct pid *pid, int sig, int priv);
2389 extern int kill_proc_info(int, struct siginfo *, pid_t);
2390 extern __must_check bool do_notify_parent(struct task_struct *, int);
2391 extern void __wake_up_parent(struct task_struct *p, struct task_struct *par
ent);
2392 extern void force_sig(int, struct task_struct *);
2393 extern int send_sig(int, struct task_struct *, int);
2394 extern int zap_other_threads(struct task_struct *p);
2395 extern struct sigqueue *sigqueue_alloc(void);
2396 extern void sigqueue_free(struct sigqueue *);
2397 extern int send_sigqueue(struct sigqueue *, struct task_struct *, int grou
p);
2398 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2399
2400 static inline void restore_saved_sigmask(void)
2401 {
2402
if (test_and_clear_restore_sigmask())
2403
__set_current_blocked(&current->saved_sigmask);
2404 }
2405
2406 static inline sigset_t *sigmask_to_save(void)
2407 {
2408
sigset_t *res = &current->blocked;
2409
if (unlikely(test_restore_sigmask()))
2410
res = &current->saved_sigmask;
2411
return res;
2412 }
2413
2414 static inline int kill_cad_pid(int sig, int priv)

2415 {
2416
return kill_pid(cad_pid, sig, priv);
2417 }
2418
2419 /* These can be the second arg to send_sig_info/send_group_sig_info. */
2420 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2421 #define SEND_SIG_PRIV ((struct siginfo *) 1)
2422 #define SEND_SIG_FORCED ((struct siginfo *) 2)
2423
2424 /*
2425 * True if we are on the alternate signal stack.
2426 */
2427 static inline int on_sig_stack(unsigned long sp)
2428 {
2429 #ifdef CONFIG_STACK_GROWSUP
2430
return sp >= current->sas_ss_sp &&
2431
sp - current->sas_ss_sp < current->sas_ss_size;
2432 #else
2433
return sp > current->sas_ss_sp &&
2434
sp - current->sas_ss_sp <= current->sas_ss_size;
2435 #endif
2436 }
2437
2438 static inline int sas_ss_flags(unsigned long sp)
2439 {
2440
if (!current->sas_ss_size)
2441
return SS_DISABLE;
2442
2443
return on_sig_stack(sp) ? SS_ONSTACK : 0;
2444 }
2445
2446 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2447 {
2448
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags
(sp))
2449 #ifdef CONFIG_STACK_GROWSUP
2450
return current->sas_ss_sp;
2451 #else
2452
return current->sas_ss_sp + current->sas_ss_size;
2453 #endif
2454
return sp;
2455 }
2456
2457 /*
2458 * Routines for handling mm_structs
2459 */
2460 extern struct mm_struct * mm_alloc(void);
2461
2462 /* mmdrop drops the mm and the page tables */
2463 extern void __mmdrop(struct mm_struct *);
2464 static inline void mmdrop(struct mm_struct * mm)
2465 {
2466
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2467
__mmdrop(mm);
2468 }
2469
2470 /* mmput gets rid of the mappings and all user-space */
2471 extern void mmput(struct mm_struct *);
2472 /* Grab a reference to a task's mm, if it is not already going away */
2473 extern struct mm_struct *get_task_mm(struct task_struct *task);

2474 /*
2475 * Grab a reference to a task's mm, if it is not already going away
2476 * and ptrace_may_access with the mode parameter passed to it
2477 * succeeds.
2478 */
2479 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int m
ode);
2480 /* Remove the current tasks stale references to the old mm_struct */
2481 extern void mm_release(struct task_struct *, struct mm_struct *);
2482
2483 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2484
struct task_struct *);
2485 extern void flush_thread(void);
2486 extern void exit_thread(void);
2487
2488 extern void exit_files(struct task_struct *);
2489 extern void __cleanup_sighand(struct sighand_struct *);
2490
2491 extern void exit_itimers(struct signal_struct *);
2492 extern void flush_itimer_signals(void);
2493
2494 extern void do_group_exit(int);
2495
2496 extern int do_execve(struct filename *,
2497
const char __user * const __user *,
2498
const char __user * const __user *);
2499 extern int do_execveat(int, struct filename *,
2500
const char __user * const __user *,
2501
const char __user * const __user *,
2502
int);
2503 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user
*, int __user *);
2504 struct task_struct *fork_idle(int);
2505 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flag
s);
2506
2507 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool
exec);
2508 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2509 {
2510
__set_task_comm(tsk, from, false);
2511 }
2512 extern char *get_task_comm(char *to, struct task_struct *tsk);
2513
2514 #ifdef CONFIG_SMP
2515 void scheduler_ipi(void);
2516 extern unsigned long wait_task_inactive(struct task_struct *, long match_st
ate);
2517 #else
2518 static inline void scheduler_ipi(void) { }
2519 static inline unsigned long wait_task_inactive(struct task_struct *p,
2520
long match_state)
2521 {
2522
return 1;
2523 }
2524 #endif
2525
2526 #define next_task(p) \
2527
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2528

2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
do
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587

#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
extern bool current_is_single_threaded(void);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
*
'break' will not work as expected - use goto instead.
*/
#define do_each_thread(g, t) \
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; )
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
#define __for_each_thread(signal, t)
\
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
#define for_each_thread(p, t)
\
__for_each_thread((p)->signal, t)
/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t) \
for_each_process(p) for_each_thread(p, t)
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
}
static inline bool thread_group_leader(struct task_struct *p)
{
return p->exit_signal >= 0;
}
/* Do to the insanities of de_thread it is possible for a process
* to have the pid of the thread group leader without actually being
* the thread group leader. For iteration through the pids in proc
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
return task_pid(p) == p->signal->leader_pid;
}
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry_rcu(p->thread_group.next,
struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)

2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
s);
2617
2618
tsk,
2619
)
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644

{
return list_empty(&p->thread_group);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
* neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);
}
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flag
static inline struct sighand_struct *lock_task_sighand(struct task_struct *
unsigned long *flags
{
struct sighand_struct *ret;
ret = __lock_task_sighand(tsk, flags);
(void)__cond_lock(&tsk->sighand->siglock, ret);
return ret;
}
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
#ifdef CONFIG_CGROUPS
static inline void threadgroup_change_begin(struct task_struct *tsk)
{
down_read(&tsk->signal->group_rwsem);
}
static inline void threadgroup_change_end(struct task_struct *tsk)
{
up_read(&tsk->signal->group_rwsem);
}
/**

2645
2646
2647
2648
2649
2650
up
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
ruct
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702

*
*
*
*
*
*

threadgroup_lock - lock threadgroup


@tsk: member task of the threadgroup to lock
Lock the threadgroup @tsk belongs to. No new task is allowed to enter
and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
change ->group_leader/pid. This is useful for cases where the threadgro

* needs to stay stable across blockable operations.


*
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
* synchronization. While held, no new task will be added to threadgroup
* and no existing live task will have its PF_EXITING set.
*
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
* sub-thread becomes a new leader.
*/
static inline void threadgroup_lock(struct task_struct *tsk)
{
down_write(&tsk->signal->group_rwsem);
}
/**
* threadgroup_unlock - unlock threadgroup
* @tsk: member task of the threadgroup to unlock
*
* Reverse threadgroup_lock().
*/
static inline void threadgroup_unlock(struct task_struct *tsk)
{
up_write(&tsk->signal->group_rwsem);
}
#else
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
static inline void threadgroup_change_end(struct task_struct *tsk) {}
static inline void threadgroup_lock(struct task_struct *tsk) {}
static inline void threadgroup_unlock(struct task_struct *tsk) {}
#endif
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_st
*org)
{
*task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p;
}
/*
* Return the address of the last usable long on the stack.
*
* When the stack grows down, this is just above the thread
* info struct. Going any lower will corrupt the threadinfo.
*
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
static inline unsigned long *end_of_stack(struct task_struct *p)

2703 {
2704 #ifdef CONFIG_STACK_GROWSUP
2705
return (unsigned long *)((unsigned long)task_thread_info(p) + THREA
D_SIZE) - 1;
2706 #else
2707
return (unsigned long *)(task_thread_info(p) + 1);
2708 #endif
2709 }
2710
2711 #endif
2712 #define task_stack_end_corrupted(task) \
2713
(*(end_of_stack(task)) != STACK_END_MAGIC)
2714
2715 static inline int object_is_on_stack(void *obj)
2716 {
2717
void *stack = task_stack_page(current);
2718
2719
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2720 }
2721
2722 extern void thread_info_cache_init(void);
2723
2724 #ifdef CONFIG_DEBUG_STACK_USAGE
2725 static inline unsigned long stack_not_used(struct task_struct *p)
2726 {
2727
unsigned long *n = end_of_stack(p);
2728
2729
do {
/* Skip over canary */
2730
n++;
2731
} while (!*n);
2732
2733
return (unsigned long)n - (unsigned long)end_of_stack(p);
2734 }
2735 #endif
2736 extern void set_task_stack_end_magic(struct task_struct *tsk);
2737
2738 /* set thread flags in other task's structures
2739 * - see asm/thread_info.h for TIF_xxxx flags available
2740 */
2741 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2742 {
2743
set_ti_thread_flag(task_thread_info(tsk), flag);
2744 }
2745
2746 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2747 {
2748
clear_ti_thread_flag(task_thread_info(tsk), flag);
2749 }
2750
2751 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int
flag)
2752 {
2753
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2754 }
2755
2756 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, i
nt flag)
2757 {
2758
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2759 }

2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)


{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
static inline int fatal_signal_pending(struct task_struct *p)
{
return signal_pending(p) && __fatal_signal_pending(p);
}
static inline int signal_pending_state(long state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
if (!signal_pending(p))
return 0;
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
extern int _cond_resched(void);

2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879

#define cond_resched() ({
\
___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched();
\
})
extern int __cond_resched_lock(spinlock_t *lock);
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_LOCK_OFFSET
#else
#define PREEMPT_LOCK_OFFSET
#endif

PREEMPT_OFFSET
0

#define cond_resched_lock(lock) ({
\
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock);
\
})
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);
__cond_resched_softirq();
})
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
rcu_read_unlock();
cond_resched();
rcu_read_lock();
#endif
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Idle thread specific functions to determine the need_resched
* polling state.
*/
#ifdef TIF_POLLING_NRFLAG
static inline int tsk_is_polling(struct task_struct *p)
{
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
static inline void __current_set_polling(void)

\
\
\

2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939

{
set_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
static inline void __current_clr_polling(void)
{
clear_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
static inline bool __must_check current_set_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
#endif
static inline void current_clr_polling(void)
{
__current_clr_polling();
/*
* Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
* Once the bit is cleared, we'll get IPIs with every new
* TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
* fold.
*/

2940
smp_mb(); /* paired with resched_curr() */
2941
2942
preempt_fold_need_resched();
2943 }
2944
2945 static __always_inline bool need_resched(void)
2946 {
2947
return unlikely(tif_need_resched());
2948 }
2949
2950 /*
2951 * Thread group CPU time accounting.
2952 */
2953 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *tim
es);
2954 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *ti
mes);
2955
2956 static inline void thread_group_cputime_init(struct signal_struct *sig)
2957 {
2958
raw_spin_lock_init(&sig->cputimer.lock);
2959 }
2960
2961 /*
2962 * Reevaluate whether the task has signals pending delivery.
2963 * Wake the task if so.
2964 * This is required every time the blocked sigset_t changes.
2965 * callers must hold sighand->siglock.
2966 */
2967 extern void recalc_sigpending_and_wake(struct task_struct *t);
2968 extern void recalc_sigpending(void);
2969
2970 extern void signal_wake_up_state(struct task_struct *t, unsigned int state)
;
2971
2972 static inline void signal_wake_up(struct task_struct *t, bool resume)
2973 {
2974
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2975 }
2976 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume
)
2977 {
2978
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2979 }
2980
2981 /*
2982 * Wrappers for p->thread_info->cpu access. No-op on UP.
2983 */
2984 #ifdef CONFIG_SMP
2985
2986 static inline unsigned int task_cpu(const struct task_struct *p)
2987 {
2988
return task_thread_info(p)->cpu;
2989 }
2990
2991 static inline int task_node(const struct task_struct *p)
2992 {
2993
return cpu_to_node(task_cpu(p));
2994 }
2995

2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055

extern void set_task_cpu(struct task_struct *p, unsigned int cpu);


#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif /* CONFIG_SMP */
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifdef CONFIG_CGROUP_SCHED
extern struct task_group root_task_group;
#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.rchar += amt;
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.wchar += amt;
}
static inline void inc_syscr(struct task_struct *tsk)
{
tsk->ioac.syscr++;
}
static inline void inc_syscw(struct task_struct *tsk)
{
tsk->ioac.syscw++;
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void inc_syscr(struct task_struct *tsk)
{
}
static inline void inc_syscw(struct task_struct *tsk)
{

3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094

}
#endif
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk)
#endif
#ifdef
extern
#else
static
{
}
#endif

TASK_SIZE

CONFIG_MEMCG
void mm_update_next_owner(struct mm_struct *mm);
inline void mm_update_next_owner(struct mm_struct *mm)
/* CONFIG_MEMCG */

static inline unsigned long task_rlimit(const struct task_struct *tsk,


unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif

Das könnte Ihnen auch gefallen