Contributors: 16
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Chris Wilson |
275 |
79.94% |
20 |
48.78% |
Michał Winiarski |
15 |
4.36% |
3 |
7.32% |
Nick Hoath |
8 |
2.33% |
1 |
2.44% |
Imre Deak |
7 |
2.03% |
1 |
2.44% |
Tvrtko A. Ursulin |
7 |
2.03% |
4 |
9.76% |
Yaodong Li |
6 |
1.74% |
1 |
2.44% |
Zou Nan hai |
5 |
1.45% |
1 |
2.44% |
Matt Roper |
4 |
1.16% |
2 |
4.88% |
Oscar Mateo |
4 |
1.16% |
1 |
2.44% |
Jani Nikula |
3 |
0.87% |
1 |
2.44% |
John Harrison |
3 |
0.87% |
1 |
2.44% |
Michal Wajdeczko |
2 |
0.58% |
1 |
2.44% |
Ander Conselvan de Oliveira |
2 |
0.58% |
1 |
2.44% |
Arun Siluvery |
1 |
0.29% |
1 |
2.44% |
Mika Kuoppala |
1 |
0.29% |
1 |
2.44% |
Zhenyu Wang |
1 |
0.29% |
1 |
2.44% |
Total |
344 |
|
41 |
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2008-2018 Intel Corporation
*/
#ifndef I915_RESET_H
#define I915_RESET_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/srcu.h>
#include "intel_engine_types.h"
#include "intel_reset_types.h"
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
struct intel_guc;
void intel_gt_init_reset(struct intel_gt *gt);
void intel_gt_fini_reset(struct intel_gt *gt);
__printf(4, 5)
void intel_gt_handle_error(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
int __intel_engine_reset_bh(struct intel_engine_cs *engine,
const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
int __must_check intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu);
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
void intel_gt_set_wedged(struct intel_gt *gt);
bool intel_gt_unset_wedged(struct intel_gt *gt);
int intel_gt_terminally_wedged(struct intel_gt *gt);
/*
* There's no unset_wedged_on_init paired with this one.
* Once we're wedged on init, there's no going back.
* Same thing for unset_wedged_on_fini.
*/
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
int intel_reset_guc(struct intel_gt *gt);
struct intel_wedge_me {
struct delayed_work work;
struct intel_gt *gt;
const char *name;
};
void __intel_init_wedge(struct intel_wedge_me *w,
struct intel_gt *gt,
long timeout,
const char *name);
void __intel_fini_wedge(struct intel_wedge_me *w);
#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
(W)->gt; \
__intel_fini_wedge((W)))
bool intel_has_gpu_reset(const struct intel_gt *gt);
bool intel_has_reset_engine(const struct intel_gt *gt);
bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt);
#endif /* I915_RESET_H */