Contributors: 16
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Chris Wilson |
268 |
80.00% |
20 |
50.00% |
Michał Winiarski |
15 |
4.48% |
3 |
7.50% |
Nick Hoath |
8 |
2.39% |
1 |
2.50% |
Imre Deak |
7 |
2.09% |
1 |
2.50% |
Tvrtko A. Ursulin |
7 |
2.09% |
4 |
10.00% |
Yaodong Li |
6 |
1.79% |
1 |
2.50% |
Zou Nan hai |
5 |
1.49% |
1 |
2.50% |
Oscar Mateo |
4 |
1.19% |
1 |
2.50% |
John Harrison |
3 |
0.90% |
1 |
2.50% |
Jani Nikula |
3 |
0.90% |
1 |
2.50% |
Matt Roper |
2 |
0.60% |
1 |
2.50% |
Michal Wajdeczko |
2 |
0.60% |
1 |
2.50% |
Ander Conselvan de Oliveira |
2 |
0.60% |
1 |
2.50% |
Zhenyu Wang |
1 |
0.30% |
1 |
2.50% |
Mika Kuoppala |
1 |
0.30% |
1 |
2.50% |
Arun Siluvery |
1 |
0.30% |
1 |
2.50% |
Total |
335 |
|
40 |
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2008-2018 Intel Corporation
*/
#ifndef I915_RESET_H
#define I915_RESET_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/srcu.h>
#include "intel_engine_types.h"
#include "intel_reset_types.h"
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
struct intel_guc;
void intel_gt_init_reset(struct intel_gt *gt);
void intel_gt_fini_reset(struct intel_gt *gt);
__printf(4, 5)
void intel_gt_handle_error(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason);
int intel_engine_reset(struct intel_engine_cs *engine,
const char *reason);
int __intel_engine_reset_bh(struct intel_engine_cs *engine,
const char *reason);
void __i915_request_reset(struct i915_request *rq, bool guilty);
int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
int __must_check intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu);
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
void intel_gt_set_wedged(struct intel_gt *gt);
bool intel_gt_unset_wedged(struct intel_gt *gt);
int intel_gt_terminally_wedged(struct intel_gt *gt);
/*
* There's no unset_wedged_on_init paired with this one.
* Once we're wedged on init, there's no going back.
* Same thing for unset_wedged_on_fini.
*/
void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
int intel_reset_guc(struct intel_gt *gt);
struct intel_wedge_me {
struct delayed_work work;
struct intel_gt *gt;
const char *name;
};
void __intel_init_wedge(struct intel_wedge_me *w,
struct intel_gt *gt,
long timeout,
const char *name);
void __intel_fini_wedge(struct intel_wedge_me *w);
#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
(W)->gt; \
__intel_fini_wedge((W)))
bool intel_has_gpu_reset(const struct intel_gt *gt);
bool intel_has_reset_engine(const struct intel_gt *gt);
#endif /* I915_RESET_H */