@@ -482,6 +482,13 @@ struct dept_task {
bool in_sched;
};
+/*
+ * for subsystems that requires compact use of memory e.g. struct page
+ */
+struct dept_ext_wgen{
+ unsigned int wgen;
+};
+
#define DEPT_TASK_INITIALIZER(t) \
{ \
.wait_hist = { { .wait = NULL, } }, \
@@ -512,6 +519,7 @@ extern void dept_task_exit(struct task_struct *t);
extern void dept_free_range(void *start, unsigned int sz);
extern void dept_map_init(struct dept_map *m, struct dept_key *k, int sub_u, const char *n);
extern void dept_map_reinit(struct dept_map *m, struct dept_key *k, int sub_u, const char *n);
+extern void dept_ext_wgen_init(struct dept_ext_wgen *ewg);
extern void dept_map_copy(struct dept_map *to, struct dept_map *from);
extern void dept_wait(struct dept_map *m, unsigned long w_f, unsigned long ip, const char *w_fn, int sub_l, long timeout);
@@ -521,8 +529,8 @@ extern void dept_clean_stage(void);
extern void dept_stage_event(struct task_struct *t, unsigned long ip);
extern void dept_ecxt_enter(struct dept_map *m, unsigned long e_f, unsigned long ip, const char *c_fn, const char *e_fn, int sub_l);
extern bool dept_ecxt_holding(struct dept_map *m, unsigned long e_f);
-extern void dept_request_event(struct dept_map *m);
-extern void dept_event(struct dept_map *m, unsigned long e_f, unsigned long ip, const char *e_fn);
+extern void dept_request_event(struct dept_map *m, struct dept_ext_wgen *ewg);
+extern void dept_event(struct dept_map *m, unsigned long e_f, unsigned long ip, const char *e_fn, struct dept_ext_wgen *ewg);
extern void dept_ecxt_exit(struct dept_map *m, unsigned long e_f, unsigned long ip);
extern void dept_sched_enter(void);
extern void dept_sched_exit(void);
@@ -551,6 +559,7 @@ extern void dept_hardirqs_off(void);
struct dept_key { };
struct dept_map { };
struct dept_task { };
+struct dept_ext_wgen { };
#define DEPT_MAP_INITIALIZER(n, k) { }
#define DEPT_TASK_INITIALIZER(t) { }
@@ -563,6 +572,7 @@ struct dept_task { };
#define dept_free_range(s, sz) do { } while (0)
#define dept_map_init(m, k, su, n) do { (void)(n); (void)(k); } while (0)
#define dept_map_reinit(m, k, su, n) do { (void)(n); (void)(k); } while (0)
+#define dept_ext_wgen_init(wg) do { } while (0)
#define dept_map_copy(t, f) do { } while (0)
#define dept_wait(m, w_f, ip, w_fn, sl, t) do { (void)(w_fn); } while (0)
@@ -572,8 +582,8 @@ struct dept_task { };
#define dept_stage_event(t, ip) do { } while (0)
#define dept_ecxt_enter(m, e_f, ip, c_fn, e_fn, sl) do { (void)(c_fn); (void)(e_fn); } while (0)
#define dept_ecxt_holding(m, e_f) false
-#define dept_request_event(m) do { } while (0)
-#define dept_event(m, e_f, ip, e_fn) do { (void)(e_fn); } while (0)
+#define dept_request_event(m, wg) do { } while (0)
+#define dept_event(m, e_f, ip, e_fn, wg) do { (void)(e_fn); } while (0)
#define dept_ecxt_exit(m, e_f, ip) do { } while (0)
#define dept_sched_enter() do { } while (0)
#define dept_sched_exit() do { } while (0)
@@ -24,7 +24,7 @@
#define sdt_wait_timeout(m, t) \
do { \
- dept_request_event(m); \
+ dept_request_event(m, NULL); \
dept_wait(m, 1UL, _THIS_IP_, __func__, 0, t); \
} while (0)
#define sdt_wait(m) sdt_wait_timeout(m, -1L)
@@ -49,7 +49,7 @@
#define sdt_might_sleep_end() dept_clean_stage()
#define sdt_ecxt_enter(m) dept_ecxt_enter(m, 1UL, _THIS_IP_, "start", "event", 0)
-#define sdt_event(m) dept_event(m, 1UL, _THIS_IP_, __func__)
+#define sdt_event(m) dept_event(m, 1UL, _THIS_IP_, __func__, NULL)
#define sdt_ecxt_exit(m) dept_ecxt_exit(m, 1UL, _THIS_IP_)
#else /* !CONFIG_DEPT */
#define sdt_map_init(m) do { } while (0)
@@ -2186,6 +2186,11 @@ void dept_map_reinit(struct dept_map *m, struct dept_key *k, int sub_u,
}
EXPORT_SYMBOL_GPL(dept_map_reinit);
+void dept_ext_wgen_init(struct dept_ext_wgen *ewg)
+{
+ ewg->wgen = 0U;
+}
+
void dept_map_copy(struct dept_map *to, struct dept_map *from)
{
if (unlikely(!dept_working())) {
@@ -2371,7 +2376,7 @@ static void __dept_wait(struct dept_map *m, unsigned long w_f,
*/
static void __dept_event(struct dept_map *m, unsigned long e_f,
unsigned long ip, const char *e_fn,
- bool sched_map)
+ bool sched_map, unsigned int wg)
{
struct dept_class *c;
struct dept_key *k;
@@ -2393,7 +2398,7 @@ static void __dept_event(struct dept_map *m, unsigned long e_f,
c = check_new_class(&m->map_key, k, sub_id(m, e), m->name, sched_map);
if (c && add_ecxt(m, c, 0UL, NULL, e_fn, 0)) {
- do_event(m, c, READ_ONCE(m->wgen), ip);
+ do_event(m, c, wg, ip);
pop_ecxt(m, c);
}
}
@@ -2606,7 +2611,7 @@ void dept_stage_event(struct task_struct *requestor, unsigned long ip)
if (!m.keys)
goto exit;
- __dept_event(&m, 1UL, ip, "try_to_wake_up", sched_map);
+ __dept_event(&m, 1UL, ip, "try_to_wake_up", sched_map, m.wgen);
exit:
dept_exit(flags);
}
@@ -2785,10 +2790,11 @@ bool dept_ecxt_holding(struct dept_map *m, unsigned long e_f)
}
EXPORT_SYMBOL_GPL(dept_ecxt_holding);
-void dept_request_event(struct dept_map *m)
+void dept_request_event(struct dept_map *m, struct dept_ext_wgen *ewg)
{
unsigned long flags;
unsigned int wg;
+ unsigned int *wg_p;
if (unlikely(!dept_working()))
return;
@@ -2801,21 +2807,25 @@ void dept_request_event(struct dept_map *m)
*/
flags = dept_enter_recursive();
+ wg_p = ewg ? &ewg->wgen : &m->wgen;
+
/*
* Avoid zero wgen.
*/
wg = atomic_inc_return(&wgen) ?: atomic_inc_return(&wgen);
- WRITE_ONCE(m->wgen, wg);
+ WRITE_ONCE(*wg_p, wg);
dept_exit_recursive(flags);
}
EXPORT_SYMBOL_GPL(dept_request_event);
void dept_event(struct dept_map *m, unsigned long e_f,
- unsigned long ip, const char *e_fn)
+ unsigned long ip, const char *e_fn,
+ struct dept_ext_wgen *ewg)
{
struct dept_task *dt = dept_task();
unsigned long flags;
+ unsigned int *wg_p;
if (unlikely(!dept_working()))
return;
@@ -2823,24 +2833,26 @@ void dept_event(struct dept_map *m, unsigned long e_f,
if (m->nocheck)
return;
+ wg_p = ewg ? &ewg->wgen : &m->wgen;
+
if (dt->recursive) {
/*
* Dept won't work with this even though an event
* context has been asked. Don't make it confused at
* handling the event. Disable it until the next.
*/
- WRITE_ONCE(m->wgen, 0U);
+ WRITE_ONCE(*wg_p, 0U);
return;
}
flags = dept_enter();
- __dept_event(m, e_f, ip, e_fn, false);
+ __dept_event(m, e_f, ip, e_fn, false, READ_ONCE(*wg_p));
/*
* Keep the map diabled until the next sleep.
*/
- WRITE_ONCE(m->wgen, 0U);
+ WRITE_ONCE(*wg_p, 0U);
dept_exit(flags);
}
There is a case where total maps for its wait/event is so large in size. For instance, struct page for PG_locked and PG_writeback is the case. The additional memory size for the maps would be 'the # of pages * sizeof(struct dept_map)' if each struct page keeps its map all the way, which might be too big to accept. It'd be better to keep the minimum data in the case, which is timestamp called 'wgen' that Dept makes use of. So made Dept able to work with an external wgen when needed. Signed-off-by: Byungchul Park <byungchul@sk.com> --- include/linux/dept.h | 18 ++++++++++++++---- include/linux/dept_sdt.h | 4 ++-- kernel/dependency/dept.c | 30 +++++++++++++++++++++--------- 3 files changed, 37 insertions(+), 15 deletions(-)