Contributors: 10
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Chris Wilson |
321 |
72.95% |
14 |
53.85% |
Andrzej Hajda |
53 |
12.05% |
2 |
7.69% |
Andi Shyti |
20 |
4.55% |
2 |
7.69% |
Matthew Brost |
20 |
4.55% |
2 |
7.69% |
Daniele Ceraolo Spurio |
10 |
2.27% |
1 |
3.85% |
Tvrtko A. Ursulin |
8 |
1.82% |
1 |
3.85% |
Matthew Auld |
4 |
0.91% |
1 |
3.85% |
Arkadiusz Hiler |
2 |
0.45% |
1 |
3.85% |
Nirmoy Das |
1 |
0.23% |
1 |
3.85% |
Mauro Carvalho Chehab |
1 |
0.23% |
1 |
3.85% |
Total |
440 |
|
26 |
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_GT_PM_H
#define INTEL_GT_PM_H
#include <linux/types.h>
#include "intel_gt_types.h"
#include "intel_wakeref.h"
static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
{
return intel_wakeref_is_active(>->wakeref);
}
static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(>->wakeref);
}
static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
{
intel_gt_pm_get_untracked(gt);
return intel_wakeref_track(>->wakeref);
}
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(>->wakeref);
}
static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
if (!intel_wakeref_get_if_active(>->wakeref))
return 0;
return intel_wakeref_track(>->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
{
intel_wakeref_might_get(>->wakeref);
}
static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(>->wakeref);
}
static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
{
intel_wakeref_untrack(>->wakeref, handle);
intel_gt_pm_put_untracked(gt);
}
static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(>->wakeref);
}
static inline void intel_gt_pm_might_put(struct intel_gt *gt)
{
intel_wakeref_might_put(>->wakeref);
}
static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
{
intel_wakeref_untrack(>->wakeref, handle);
intel_gt_pm_put_async_untracked(gt);
}
#define with_intel_gt_pm(gt, wf) \
for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
* it to sleep, run some code and then asynchrously put the reference
* away.
*
* @gt: pointer to the gt
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(>->wakeref);
}
void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_pm_init(struct intel_gt *gt);
void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
void intel_gt_resume_early(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
static inline bool is_mock_gt(const struct intel_gt *gt)
{
return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
}
#endif /* INTEL_GT_PM_H */