Contributors: 12
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Lv Zheng |
127 |
51.42% |
9 |
33.33% |
Sebastian Andrzej Siewior |
62 |
25.10% |
1 |
3.70% |
Suren Baghdasaryan |
16 |
6.48% |
2 |
7.41% |
Len Brown |
9 |
3.64% |
3 |
11.11% |
Rafael J. Wysocki |
8 |
3.24% |
1 |
3.70% |
Lin Ming |
7 |
2.83% |
1 |
3.70% |
Robert Moore |
6 |
2.43% |
3 |
11.11% |
Linus Torvalds (pre-git) |
5 |
2.02% |
3 |
11.11% |
Andy Grover |
3 |
1.21% |
1 |
3.70% |
Thomas Gleixner |
2 |
0.81% |
1 |
3.70% |
Linus Torvalds |
1 |
0.40% |
1 |
3.70% |
Erik Schmauss |
1 |
0.40% |
1 |
3.70% |
Total |
247 |
|
27 |
|
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#ifndef __ACLINUXEX_H__
#define __ACLINUXEX_H__
#ifdef __KERNEL__
#ifndef ACPI_USE_NATIVE_DIVIDE
#ifndef ACPI_DIV_64_BY_32
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
do { \
u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
(r32) = do_div ((__n), (d32)); \
(q32) = (u32) (__n); \
} while (0)
#endif
#ifndef ACPI_SHIFT_RIGHT_64
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
do { \
(n_lo) >>= 1; \
(n_lo) |= (((n_hi) & 1) << 31); \
(n_hi) >>= 1; \
} while (0)
#endif
#endif
/*
* Overrides for in-kernel ACPICA
*/
acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
acpi_status acpi_os_terminate(void);
/*
* The irqs_disabled() check is for resume from RAM.
* Interrupts are off during resume, just like they are for boot.
* However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not.
*
* These specialized allocators have to be macros for their allocations to be
* accounted separately (to have separate alloc_tag).
*/
#define acpi_os_allocate(_size) \
kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
#define acpi_os_allocate_zeroed(_size) \
kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
#define acpi_os_acquire_object(_cache) \
kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
static inline void acpi_os_free(void *memory)
{
kfree(memory);
}
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
return (acpi_thread_id) (unsigned long)current;
}
/*
* When lockdep is enabled, the spin_lock_init() macro stringifies it's
* argument and uses that as a name for the lock in debugging.
* By executing spin_lock_init() in a macro the key changes from "lock" for
* all locks to the name of the argument of acpi_os_create_lock(), which
* prevents lockdep from reporting false positives for ACPICA locks.
*/
#define acpi_os_create_lock(__handle) \
({ \
spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
if (lock) { \
*(__handle) = lock; \
spin_lock_init(*(__handle)); \
} \
lock ? AE_OK : AE_NO_MEMORY; \
})
#define acpi_os_create_raw_lock(__handle) \
({ \
raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
if (lock) { \
*(__handle) = lock; \
raw_spin_lock_init(*(__handle)); \
} \
lock ? AE_OK : AE_NO_MEMORY; \
})
static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
{
acpi_cpu_flags flags;
raw_spin_lock_irqsave(lockp, flags);
return flags;
}
static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
acpi_cpu_flags flags)
{
raw_spin_unlock_irqrestore(lockp, flags);
}
static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
{
ACPI_FREE(handle);
}
static inline u8 acpi_os_readable(void *pointer, acpi_size length)
{
return TRUE;
}
static inline acpi_status acpi_os_initialize_debugger(void)
{
return AE_OK;
}
static inline void acpi_os_terminate_debugger(void)
{
return;
}
/*
* OSL interfaces added by Linux
*/
#endif /* __KERNEL__ */
#endif /* __ACLINUXEX_H__ */