forked from luck/tmp_suning_uos_patched
Merge branch 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (22 commits) ACPI: fix early DSDT dmi check warnings on ia64 ACPICA: Update version to 20100428. ACPICA: Update/clarify some parameter names associated with acpi_handle ACPICA: Rename acpi_ex_system_do_suspend->acpi_ex_system_do_sleep ACPICA: Prevent possible allocation overrun during object copy ACPICA: Split large file, evgpeblk ACPICA: Add GPE support for dynamically loaded ACPI tables ACPICA: Clarify/rename some root table descriptor fields ACPICA: Update version to 20100331. ACPICA: Minimize the differences between linux GPE code and ACPICA code base ACPI: add boot option acpi=copy_dsdt to fix corrupt DSDT ACPICA: Update DSDT copy/detection. ACPICA: Add subsystem option to force copy of DSDT to local memory ACPICA: Add detection of corrupted/replaced DSDT ACPICA: Add write support for DataTable operation regions ACPICA: Fix for acpi_reallocate_root_table for incorrect root table copy ACPICA: Update comments/headers, no functional change ACPICA: Update version to 20100304 ACPICA: Fix for possible fault in acpi_ex_release_mutex ACPICA: Standardize integer output for ACPICA warnings/errors ...
This commit is contained in:
commit
04afb40593
|
@ -152,6 +152,7 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
strict -- Be less tolerant of platforms that are not
|
||||
strictly ACPI specification compliant.
|
||||
rsdt -- prefer RSDT over (default) XSDT
|
||||
copy_dsdt -- copy DSDT to memory
|
||||
|
||||
See also Documentation/power/pm.txt, pci=noacpi
|
||||
|
||||
|
|
|
@ -1613,6 +1613,10 @@ static int __init parse_acpi(char *arg)
|
|||
/* "acpi=noirq" disables ACPI interrupt routing */
|
||||
else if (strcmp(arg, "noirq") == 0) {
|
||||
acpi_noirq_set();
|
||||
}
|
||||
/* "acpi=copy_dsdt" copys DSDT */
|
||||
else if (strcmp(arg, "copy_dsdt") == 0) {
|
||||
acpi_gbl_copy_dsdt_locally = 1;
|
||||
} else {
|
||||
/* Core will printk when we return error. */
|
||||
return -EINVAL;
|
||||
|
|
|
@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
|
|||
|
||||
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
|
||||
evmisc.o evrgnini.o evxface.o evxfregn.o \
|
||||
evgpe.o evgpeblk.o
|
||||
evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
|
||||
|
||||
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
|
||||
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
|
||||
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
|
||||
exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
|
||||
exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
|
||||
|
||||
acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
|
||||
|
||||
|
|
|
@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
|
|||
u32 notify_value);
|
||||
|
||||
/*
|
||||
* evgpe - GPE handling and dispatch
|
||||
* evgpe - Low-level GPE support
|
||||
*/
|
||||
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
|
@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
|||
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
||||
u32 gpe_number);
|
||||
|
||||
struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
|
||||
struct acpi_gpe_block_info
|
||||
*gpe_block);
|
||||
|
||||
/*
|
||||
* evgpeblk
|
||||
* evgpeblk - Upper-level GPE block support
|
||||
*/
|
||||
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
||||
struct acpi_generic_address *gpe_block_address,
|
||||
|
@ -116,12 +112,37 @@ u32
|
|||
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
|
||||
u32 gpe_number);
|
||||
|
||||
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
|
||||
/*
|
||||
* evgpeinit - GPE initialization and update
|
||||
*/
|
||||
acpi_status acpi_ev_gpe_initialize(void);
|
||||
|
||||
void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
||||
acpi_ev_match_gpe_method(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
|
||||
acpi_status acpi_ev_gpe_initialize(void);
|
||||
acpi_status
|
||||
acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value);
|
||||
|
||||
/*
|
||||
* evgpeutil - GPE utilities
|
||||
*/
|
||||
acpi_status
|
||||
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
|
||||
|
||||
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
|
||||
|
||||
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
|
||||
|
||||
acpi_status
|
||||
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *context);
|
||||
|
||||
/*
|
||||
* evregion - Address Space handling
|
||||
|
|
|
@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
|
|||
*/
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
|
||||
|
||||
/*
|
||||
* Optionally enable output from the AML Debug Object.
|
||||
*/
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
|
||||
|
||||
/*
|
||||
* Optionally copy the entire DSDT to local memory (instead of simply
|
||||
* mapping it.) There are some BIOSs that corrupt or replace the original
|
||||
* DSDT, creating the need for this option. Default is FALSE, do not copy
|
||||
* the DSDT.
|
||||
*/
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
|
||||
|
||||
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
|
||||
|
||||
struct acpi_table_fadt acpi_gbl_FADT;
|
||||
|
@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
|
|||
****************************************************************************/
|
||||
|
||||
/*
|
||||
* acpi_gbl_root_table_list is the master list of ACPI tables found in the
|
||||
* RSDT/XSDT.
|
||||
*
|
||||
* acpi_gbl_root_table_list is the master list of ACPI tables that were
|
||||
* found in the RSDT/XSDT.
|
||||
*/
|
||||
ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
|
||||
ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
|
||||
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
|
||||
|
||||
/* These addresses are calculated from the FADT Event Block addresses */
|
||||
|
@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
|
|||
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
|
||||
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
|
||||
|
||||
/* DSDT information. Used to check for DSDT corruption */
|
||||
|
||||
ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
|
||||
ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
|
||||
|
||||
/*
|
||||
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
|
||||
* determined by the revision of the DSDT: If the DSDT revision is less than
|
||||
|
|
|
@ -120,6 +120,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
|
|||
union acpi_operand_object **result_desc,
|
||||
struct acpi_walk_state *walk_state);
|
||||
|
||||
/*
|
||||
* exdebug - AML debug object
|
||||
*/
|
||||
void
|
||||
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
|
||||
u32 level, u32 index);
|
||||
|
||||
/*
|
||||
* exfield - ACPI AML (p-code) execution - field manipulation
|
||||
*/
|
||||
|
@ -274,7 +281,7 @@ acpi_status
|
|||
acpi_ex_system_do_notify_op(union acpi_operand_object *value,
|
||||
union acpi_operand_object *obj_desc);
|
||||
|
||||
acpi_status acpi_ex_system_do_suspend(u64 time);
|
||||
acpi_status acpi_ex_system_do_sleep(u64 time);
|
||||
|
||||
acpi_status acpi_ex_system_do_stall(u32 time);
|
||||
|
||||
|
|
|
@ -213,12 +213,12 @@ struct acpi_namespace_node {
|
|||
#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
|
||||
#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
|
||||
|
||||
/* One internal RSDT for table management */
|
||||
/* Internal ACPI table management - master table list */
|
||||
|
||||
struct acpi_internal_rsdt {
|
||||
struct acpi_table_desc *tables;
|
||||
u32 count;
|
||||
u32 size;
|
||||
struct acpi_table_list {
|
||||
struct acpi_table_desc *tables; /* Table descriptor array */
|
||||
u32 current_table_count; /* Tables currently in the array */
|
||||
u32 max_table_count; /* Max tables array will hold */
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
|
@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
|
|||
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
|
||||
u8 flags; /* Misc info about this GPE */
|
||||
u8 gpe_number; /* This GPE */
|
||||
u8 runtime_count;
|
||||
u8 wakeup_count;
|
||||
u8 runtime_count; /* References to a run GPE */
|
||||
u8 wakeup_count; /* References to a wake GPE */
|
||||
};
|
||||
|
||||
/* Information about a GPE register pair, one per each status/enable pair in an array */
|
||||
|
@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
|
|||
struct acpi_gpe_event_info *event_info; /* One for each GPE */
|
||||
struct acpi_generic_address block_address; /* Base address of the block */
|
||||
u32 register_count; /* Number of register pairs in block */
|
||||
u16 gpe_count; /* Number of individual GPEs in block */
|
||||
u8 block_base_number; /* Base GPE number for this block */
|
||||
};
|
||||
|
||||
|
@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
|
|||
struct acpi_gpe_walk_info {
|
||||
struct acpi_namespace_node *gpe_device;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
u16 count;
|
||||
acpi_owner_id owner_id;
|
||||
u8 enable_this_gpe;
|
||||
u8 execute_by_owner_id;
|
||||
};
|
||||
|
||||
struct acpi_gpe_device_info {
|
||||
|
|
|
@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
|
|||
acpi_status
|
||||
acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
|
||||
|
||||
void acpi_tb_check_dsdt_header(void);
|
||||
|
||||
struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
|
||||
|
||||
void
|
||||
acpi_tb_install_table(acpi_physical_address address,
|
||||
char *signature, u32 table_index);
|
||||
|
|
|
@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Invalid opcode in field list: %X",
|
||||
"Invalid opcode in field list: 0x%X",
|
||||
arg->common.aml_opcode));
|
||||
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
|
|||
(walk_state->thread->current_sync_level >
|
||||
obj_desc->method.mutex->mutex.sync_level)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
|
||||
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
|
||||
acpi_ut_get_node_name(method_node),
|
||||
walk_state->thread->current_sync_level));
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
|
|||
|
||||
if (index > ACPI_METHOD_MAX_LOCAL) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Local index %d is invalid (max %d)",
|
||||
"Local index %u is invalid (max %u)",
|
||||
index, ACPI_METHOD_MAX_LOCAL));
|
||||
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
|
|||
|
||||
if (index > ACPI_METHOD_MAX_ARG) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Arg index %d is invalid (max %d)",
|
||||
"Arg index %u is invalid (max %u)",
|
||||
index, ACPI_METHOD_MAX_ARG));
|
||||
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
|
||||
ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
|
||||
return_ACPI_STATUS(AE_TYPE);
|
||||
}
|
||||
|
||||
|
@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
|
|||
case ACPI_REFCLASS_ARG:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Uninitialized Arg[%d] at node %p",
|
||||
"Uninitialized Arg[%u] at node %p",
|
||||
index, node));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
|
||||
|
@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Not a Arg/Local opcode: %X",
|
||||
"Not a Arg/Local opcode: 0x%X",
|
||||
type));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
|
|||
if (byte_list) {
|
||||
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Expecting bytelist, got AML opcode %X in op %p",
|
||||
"Expecting bytelist, found AML opcode 0x%X in op %p",
|
||||
byte_list->common.aml_opcode, byte_list));
|
||||
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
|
@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
|||
}
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n",
|
||||
"Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
|
||||
i, element_count));
|
||||
} else if (i < element_count) {
|
||||
/*
|
||||
|
@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
|
|||
* Note: this is not an error, the package is padded out with NULLs.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
|
||||
"Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
|
||||
i, element_count));
|
||||
}
|
||||
|
||||
|
@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown constant opcode %X",
|
||||
"Unknown constant opcode 0x%X",
|
||||
opcode));
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
break;
|
||||
|
@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
|
||||
op_info->type));
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
break;
|
||||
|
@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unimplemented reference type for AML opcode: %4.4X",
|
||||
"Unimplemented reference type for AML opcode: 0x%4.4X",
|
||||
opcode));
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
|
||||
ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
|
||||
obj_desc->common.type));
|
||||
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
|
|
|
@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
|
|||
node = obj_desc->buffer.node;
|
||||
if (!node) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No pointer back to NS node in buffer obj %p",
|
||||
"No pointer back to namespace node in buffer object %p",
|
||||
obj_desc));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
|
|||
node = obj_desc->package.node;
|
||||
if (!node) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No pointer back to NS node in package %p",
|
||||
"No pointer back to namespace node in package %p",
|
||||
obj_desc));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown field creation opcode %02x", aml_opcode));
|
||||
"Unknown field creation opcode 0x%02X",
|
||||
aml_opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
|
|||
|
||||
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
|
||||
"Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
|
||||
acpi_ut_get_node_name(result_desc),
|
||||
bit_offset + bit_count,
|
||||
acpi_ut_get_node_name(buffer_desc->buffer.node),
|
||||
|
@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
|
|||
status = acpi_ex_resolve_operands(op->common.aml_opcode,
|
||||
ACPI_WALK_OPERANDS, walk_state);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
|
||||
ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
|
||||
acpi_ps_get_opcode_name(op->common.aml_opcode),
|
||||
status));
|
||||
|
||||
|
@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
|
||||
ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
|
||||
op->common.aml_opcode, op));
|
||||
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
|
|
|
@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
|
|||
|
||||
if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
|
||||
"Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
|
||||
obj_desc, walk_state, obj_desc->common.type));
|
||||
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
|
@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
|
|||
op_class = walk_state->op_info->class;
|
||||
|
||||
if (op_class == AML_CLASS_UNKNOWN) {
|
||||
ACPI_ERROR((AE_INFO, "Unknown opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
|
||||
op->common.aml_opcode));
|
||||
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
|
||||
"Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
|
||||
op_class, op_type, op->common.aml_opcode,
|
||||
op));
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
|
|||
|
||||
if (!object) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Null Object! Obj=%p State=%p Num=%X",
|
||||
"Null Object! Obj=%p State=%p Num=%u",
|
||||
object, walk_state, walk_state->result_count));
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
|
|||
|
||||
if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
|
||||
ACPI_RESULTS_OBJ_NUM_MAX) {
|
||||
ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
|
||||
ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
|
||||
walk_state, walk_state->result_size));
|
||||
return (AE_STACK_OVERFLOW);
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
|
|||
|
||||
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Object stack overflow! Obj=%p State=%p #Ops=%X",
|
||||
"Object stack overflow! Obj=%p State=%p #Ops=%u",
|
||||
object, walk_state, walk_state->num_operands));
|
||||
return (AE_STACK_OVERFLOW);
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
|
|||
|
||||
if (walk_state->num_operands == 0) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Object stack underflow! Count=%X State=%p #Ops=%X",
|
||||
"Object stack underflow! Count=%X State=%p #Ops=%u",
|
||||
pop_count, walk_state,
|
||||
walk_state->num_operands));
|
||||
return (AE_STACK_UNDERFLOW);
|
||||
|
@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
|
|||
walk_state->operands[walk_state->num_operands] = NULL;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
|
||||
pop_count, walk_state, walk_state->num_operands));
|
||||
|
||||
return (AE_OK);
|
||||
|
|
|
@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
|
|||
ACPI_DISABLE_EVENT);
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No installed handler for fixed event [%08X]",
|
||||
"No installed handler for fixed event [0x%08X]",
|
||||
event));
|
||||
|
||||
return (ACPI_INTERRUPT_NOT_HANDLED);
|
||||
|
|
|
@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
|
|||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Updates GPE register enable masks based on the GPE type
|
||||
* DESCRIPTION: Updates GPE register enable masks based upon whether there are
|
||||
* references (either wake or run) to this GPE
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
|
|||
(1 <<
|
||||
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
|
||||
|
||||
/* Clear the wake/run bits up front */
|
||||
|
||||
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
|
||||
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
|
||||
|
||||
if (gpe_event_info->runtime_count)
|
||||
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
|
||||
/* Set the mask bits only if there are references to this GPE */
|
||||
|
||||
if (gpe_event_info->wakeup_count)
|
||||
if (gpe_event_info->runtime_count) {
|
||||
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
|
||||
}
|
||||
|
||||
if (gpe_event_info->wakeup_count) {
|
||||
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
|
|||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enable a GPE based on the GPE type
|
||||
* DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
|
||||
* of type or number of references.
|
||||
*
|
||||
* Note: The GPE lock should be already acquired when this function is called.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||
{
|
||||
acpi_status status;
|
||||
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_enable_gpe);
|
||||
|
||||
/* Make sure HW enable masks are updated */
|
||||
|
||||
/*
|
||||
* We will only allow a GPE to be enabled if it has either an
|
||||
* associated method (_Lxx/_Exx) or a handler. Otherwise, the
|
||||
* GPE will be immediately disabled by acpi_ev_gpe_dispatch the
|
||||
* first time it fires.
|
||||
*/
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
|
||||
return_ACPI_STATUS(AE_NO_HANDLER);
|
||||
}
|
||||
|
||||
/* Ensure the HW enable masks are current */
|
||||
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_FAILURE(status))
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Clear the GPE (of stale events) */
|
||||
|
||||
/* Clear the GPE (of stale events), then enable it */
|
||||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status))
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Enable the requested GPE */
|
||||
|
||||
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Disable a GPE based on the GPE type
|
||||
* DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
|
||||
* regardless of the type or number of references.
|
||||
*
|
||||
* Note: The GPE lock should be already acquired when this function is called.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
|
@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ev_disable_gpe);
|
||||
|
||||
/* Make sure HW enable masks are updated */
|
||||
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_FAILURE(status))
|
||||
return_ACPI_STATUS(status);
|
||||
|
||||
/*
|
||||
* Even if we don't know the GPE type, make sure that we always
|
||||
* disable it. low_disable_gpe will just clear the enable bit for this
|
||||
* GPE and write it. It will not write out the current GPE enable mask,
|
||||
* since this may inadvertently enable GPEs too early, if a rogue GPE has
|
||||
* come in during ACPICA initialization - possibly as a result of AML or
|
||||
* other code that has enabled the GPE.
|
||||
* Note: Always disable the GPE, even if we think that that it is already
|
||||
* disabled. It is possible that the AML or some other code has enabled
|
||||
* the GPE behind our back.
|
||||
*/
|
||||
|
||||
/* Ensure the HW enable masks are current */
|
||||
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Always H/W disable this GPE, even if we don't know the GPE type.
|
||||
* Simply clear the enable bit for this particular GPE, but do not
|
||||
* write out the current GPE enable mask since this may inadvertently
|
||||
* enable GPEs too early. An example is a rogue GPE that has arrived
|
||||
* during ACPICA initialization - possibly because AML or other code
|
||||
* has enabled the GPE.
|
||||
*/
|
||||
status = acpi_hw_low_disable_gpe(gpe_event_info);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_low_get_gpe_info
|
||||
*
|
||||
* PARAMETERS: gpe_number - Raw GPE number
|
||||
* gpe_block - A GPE info block
|
||||
*
|
||||
* RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
|
||||
* is not within the specified GPE block)
|
||||
*
|
||||
* DESCRIPTION: Returns the event_info struct associated with this GPE. This is
|
||||
* the low-level implementation of ev_get_gpe_event_info.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
|
||||
struct acpi_gpe_block_info
|
||||
*gpe_block)
|
||||
{
|
||||
u32 gpe_index;
|
||||
|
||||
/*
|
||||
* Validate that the gpe_number is within the specified gpe_block.
|
||||
* (Two steps)
|
||||
*/
|
||||
if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
gpe_index = gpe_number - gpe_block->block_base_number;
|
||||
if (gpe_index >= gpe_block->gpe_count) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (&gpe_block->event_info[gpe_index]);
|
||||
}
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_get_gpe_event_info
|
||||
|
@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
|||
u32 gpe_number)
|
||||
{
|
||||
union acpi_operand_object *obj_desc;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_event_info *gpe_info;
|
||||
u32 i;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* A NULL gpe_block means use the FADT-defined GPE block(s) */
|
||||
/* A NULL gpe_device means use the FADT-defined GPE block(s) */
|
||||
|
||||
if (!gpe_device) {
|
||||
|
||||
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
|
||||
|
||||
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
|
||||
gpe_block = acpi_gbl_gpe_fadt_blocks[i];
|
||||
if (gpe_block) {
|
||||
if ((gpe_number >= gpe_block->block_base_number)
|
||||
&& (gpe_number <
|
||||
gpe_block->block_base_number +
|
||||
(gpe_block->register_count * 8))) {
|
||||
return (&gpe_block->
|
||||
event_info[gpe_number -
|
||||
gpe_block->
|
||||
block_base_number]);
|
||||
}
|
||||
gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
|
||||
acpi_gbl_gpe_fadt_blocks
|
||||
[i]);
|
||||
if (gpe_info) {
|
||||
return (gpe_info);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
|||
return (NULL);
|
||||
}
|
||||
|
||||
gpe_block = obj_desc->device.gpe_block;
|
||||
|
||||
if ((gpe_number >= gpe_block->block_base_number) &&
|
||||
(gpe_number <
|
||||
gpe_block->block_base_number + (gpe_block->register_count * 8))) {
|
||||
return (&gpe_block->
|
||||
event_info[gpe_number - gpe_block->block_base_number]);
|
||||
}
|
||||
|
||||
return (NULL);
|
||||
return (acpi_ev_low_get_gpe_info
|
||||
(gpe_number, obj_desc->device.gpe_block));
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
|||
return_VOID;
|
||||
}
|
||||
|
||||
/* Set the GPE flags for return to enabled state */
|
||||
/* Update the GPE register masks for return to enabled state */
|
||||
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
|
||||
|
@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to clear GPE[%2X]",
|
||||
"Unable to clear GPE[0x%2X]",
|
||||
gpe_number));
|
||||
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
|
||||
}
|
||||
|
@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to clear GPE[%2X]",
|
||||
"Unable to clear GPE[0x%2X]",
|
||||
gpe_number));
|
||||
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
|
||||
}
|
||||
|
@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to disable GPE[%2X]",
|
||||
"Unable to disable GPE[0x%2X]",
|
||||
gpe_number));
|
||||
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
|
||||
}
|
||||
|
@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to queue handler for GPE[%2X] - event disabled",
|
||||
"Unable to queue handler for GPE[0x%2X] - event disabled",
|
||||
gpe_number));
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
/* No handler or method to run! */
|
||||
|
||||
/*
|
||||
* No handler or method to run!
|
||||
* 03/2010: This case should no longer be possible. We will not allow
|
||||
* a GPE to be enabled if it has no handler or method.
|
||||
*/
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"No handler or method for GPE[%2X], disabling event",
|
||||
"No handler or method for GPE[0x%2X], disabling event",
|
||||
gpe_number));
|
||||
|
||||
/*
|
||||
* Disable the GPE. The GPE will remain disabled until the ACPICA
|
||||
* Core Subsystem is restarted, or a handler is installed.
|
||||
* Disable the GPE. The GPE will remain disabled a handler
|
||||
* is installed or ACPICA is restarted.
|
||||
*/
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to disable GPE[%2X]",
|
||||
"Unable to disable GPE[0x%2X]",
|
||||
gpe_number));
|
||||
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
|
||||
}
|
||||
|
|
|
@ -50,20 +50,6 @@
|
|||
ACPI_MODULE_NAME("evgpeblk")
|
||||
|
||||
/* Local prototypes */
|
||||
static acpi_status
|
||||
acpi_ev_save_method_info(acpi_handle obj_handle,
|
||||
u32 level, void *obj_desc, void **return_value);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
|
||||
u32 level, void *info, void **return_value);
|
||||
|
||||
static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
|
||||
interrupt_number);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
|
||||
u32 interrupt_number);
|
||||
|
@ -71,527 +57,6 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
|
|||
static acpi_status
|
||||
acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_valid_gpe_event
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - Info for this GPE
|
||||
*
|
||||
* RETURN: TRUE if the gpe_event is valid
|
||||
*
|
||||
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
|
||||
* Should be called only when the GPE lists are semaphore locked
|
||||
* and not subject to change.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* No need for spin lock since we are not changing any list elements */
|
||||
|
||||
/* Walk the GPE interrupt levels */
|
||||
|
||||
gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (gpe_xrupt_block) {
|
||||
gpe_block = gpe_xrupt_block->gpe_block_list_head;
|
||||
|
||||
/* Walk the GPE blocks on this interrupt level */
|
||||
|
||||
while (gpe_block) {
|
||||
if ((&gpe_block->event_info[0] <= gpe_event_info) &&
|
||||
(&gpe_block->event_info[((acpi_size)
|
||||
gpe_block->
|
||||
register_count) * 8] >
|
||||
gpe_event_info)) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
gpe_block = gpe_block->next;
|
||||
}
|
||||
|
||||
gpe_xrupt_block = gpe_xrupt_block->next;
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_walk_gpe_list
|
||||
*
|
||||
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
|
||||
* Context - Value passed to callback
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Walk the GPE lists.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
|
||||
{
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
|
||||
acpi_status status = AE_OK;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
/* Walk the interrupt level descriptor list */
|
||||
|
||||
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (gpe_xrupt_info) {
|
||||
|
||||
/* Walk all Gpe Blocks attached to this interrupt level */
|
||||
|
||||
gpe_block = gpe_xrupt_info->gpe_block_list_head;
|
||||
while (gpe_block) {
|
||||
|
||||
/* One callback per GPE block */
|
||||
|
||||
status =
|
||||
gpe_walk_callback(gpe_xrupt_info, gpe_block,
|
||||
context);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_CTRL_END) { /* Callback abort */
|
||||
status = AE_OK;
|
||||
}
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
gpe_block = gpe_block->next;
|
||||
}
|
||||
|
||||
gpe_xrupt_info = gpe_xrupt_info->next;
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_delete_gpe_handlers
|
||||
*
|
||||
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
||||
* gpe_block - Gpe Block info
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Delete all Handler objects found in the GPE data structs.
|
||||
* Used only prior to termination.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *context)
|
||||
{
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
u32 i;
|
||||
u32 j;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
|
||||
|
||||
/* Examine each GPE Register within the block */
|
||||
|
||||
for (i = 0; i < gpe_block->register_count; i++) {
|
||||
|
||||
/* Now look at the individual GPEs in this byte register */
|
||||
|
||||
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
|
||||
gpe_event_info = &gpe_block->event_info[((acpi_size) i *
|
||||
ACPI_GPE_REGISTER_WIDTH)
|
||||
+ j];
|
||||
|
||||
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
|
||||
ACPI_GPE_DISPATCH_HANDLER) {
|
||||
ACPI_FREE(gpe_event_info->dispatch.handler);
|
||||
gpe_event_info->dispatch.handler = NULL;
|
||||
gpe_event_info->flags &=
|
||||
~ACPI_GPE_DISPATCH_MASK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_save_method_info
|
||||
*
|
||||
* PARAMETERS: Callback from walk_namespace
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
|
||||
* control method under the _GPE portion of the namespace.
|
||||
* Extract the name and GPE type from the object, saving this
|
||||
* information for quick lookup during GPE dispatch
|
||||
*
|
||||
* The name of each GPE control method is of the form:
|
||||
* "_Lxx" or "_Exx"
|
||||
* Where:
|
||||
* L - means that the GPE is level triggered
|
||||
* E - means that the GPE is edge triggered
|
||||
* xx - is the GPE number [in HEX]
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_save_method_info(acpi_handle obj_handle,
|
||||
u32 level, void *obj_desc, void **return_value)
|
||||
{
|
||||
struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
u32 gpe_number;
|
||||
char name[ACPI_NAME_SIZE + 1];
|
||||
u8 type;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_save_method_info);
|
||||
|
||||
/*
|
||||
* _Lxx and _Exx GPE method support
|
||||
*
|
||||
* 1) Extract the name from the object and convert to a string
|
||||
*/
|
||||
ACPI_MOVE_32_TO_32(name,
|
||||
&((struct acpi_namespace_node *)obj_handle)->name.
|
||||
integer);
|
||||
name[ACPI_NAME_SIZE] = 0;
|
||||
|
||||
/*
|
||||
* 2) Edge/Level determination is based on the 2nd character
|
||||
* of the method name
|
||||
*
|
||||
* NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
|
||||
* if a _PRW object is found that points to this GPE.
|
||||
*/
|
||||
switch (name[1]) {
|
||||
case 'L':
|
||||
type = ACPI_GPE_LEVEL_TRIGGERED;
|
||||
break;
|
||||
|
||||
case 'E':
|
||||
type = ACPI_GPE_EDGE_TRIGGERED;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Unknown method type, just ignore it! */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Ignoring unknown GPE method type: %s "
|
||||
"(name not of form _Lxx or _Exx)", name));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Convert the last two characters of the name to the GPE Number */
|
||||
|
||||
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
|
||||
if (gpe_number == ACPI_UINT32_MAX) {
|
||||
|
||||
/* Conversion failed; invalid method, just ignore it */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Could not extract GPE number from name: %s "
|
||||
"(name is not of form _Lxx or _Exx)", name));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Ensure that we have a valid GPE number for this GPE block */
|
||||
|
||||
if ((gpe_number < gpe_block->block_base_number) ||
|
||||
(gpe_number >= (gpe_block->block_base_number +
|
||||
(gpe_block->register_count * 8)))) {
|
||||
/*
|
||||
* Not valid for this GPE block, just ignore it. However, it may be
|
||||
* valid for a different GPE block, since GPE0 and GPE1 methods both
|
||||
* appear under \_GPE.
|
||||
*/
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we can add this information to the gpe_event_info block for use
|
||||
* during dispatch of this GPE.
|
||||
*/
|
||||
gpe_event_info =
|
||||
&gpe_block->event_info[gpe_number - gpe_block->block_base_number];
|
||||
|
||||
gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
|
||||
|
||||
gpe_event_info->dispatch.method_node =
|
||||
(struct acpi_namespace_node *)obj_handle;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Registered GPE method %s as GPE number 0x%.2X\n",
|
||||
name, gpe_number));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_match_prw_and_gpe
|
||||
*
|
||||
* PARAMETERS: Callback from walk_namespace
|
||||
*
|
||||
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
|
||||
* not aborted on a single _PRW failure.
|
||||
*
|
||||
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
|
||||
* Device. Run the _PRW method. If present, extract the GPE
|
||||
* number and mark the GPE as a WAKE GPE.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
|
||||
u32 level, void *info, void **return_value)
|
||||
{
|
||||
struct acpi_gpe_walk_info *gpe_info = (void *)info;
|
||||
struct acpi_namespace_node *gpe_device;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_namespace_node *target_gpe_device;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
union acpi_operand_object *pkg_desc;
|
||||
union acpi_operand_object *obj_desc;
|
||||
u32 gpe_number;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
|
||||
|
||||
/* Check for a _PRW method under this device */
|
||||
|
||||
status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
|
||||
ACPI_BTYPE_PACKAGE, &pkg_desc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* Ignore all errors from _PRW, we don't want to abort the subsystem */
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* The returned _PRW package must have at least two elements */
|
||||
|
||||
if (pkg_desc->package.count < 2) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Extract pointers from the input context */
|
||||
|
||||
gpe_device = gpe_info->gpe_device;
|
||||
gpe_block = gpe_info->gpe_block;
|
||||
|
||||
/*
|
||||
* The _PRW object must return a package, we are only interested in the
|
||||
* first element
|
||||
*/
|
||||
obj_desc = pkg_desc->package.elements[0];
|
||||
|
||||
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
|
||||
|
||||
/* Use FADT-defined GPE device (from definition of _PRW) */
|
||||
|
||||
target_gpe_device = acpi_gbl_fadt_gpe_device;
|
||||
|
||||
/* Integer is the GPE number in the FADT described GPE blocks */
|
||||
|
||||
gpe_number = (u32) obj_desc->integer.value;
|
||||
} else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
|
||||
|
||||
/* Package contains a GPE reference and GPE number within a GPE block */
|
||||
|
||||
if ((obj_desc->package.count < 2) ||
|
||||
((obj_desc->package.elements[0])->common.type !=
|
||||
ACPI_TYPE_LOCAL_REFERENCE) ||
|
||||
((obj_desc->package.elements[1])->common.type !=
|
||||
ACPI_TYPE_INTEGER)) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Get GPE block reference and decode */
|
||||
|
||||
target_gpe_device =
|
||||
obj_desc->package.elements[0]->reference.node;
|
||||
gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
|
||||
} else {
|
||||
/* Unknown type, just ignore it */
|
||||
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Is this GPE within this block?
|
||||
*
|
||||
* TRUE if and only if these conditions are true:
|
||||
* 1) The GPE devices match.
|
||||
* 2) The GPE index(number) is within the range of the Gpe Block
|
||||
* associated with the GPE device.
|
||||
*/
|
||||
if ((gpe_device == target_gpe_device) &&
|
||||
(gpe_number >= gpe_block->block_base_number) &&
|
||||
(gpe_number < gpe_block->block_base_number +
|
||||
(gpe_block->register_count * 8))) {
|
||||
gpe_event_info = &gpe_block->event_info[gpe_number -
|
||||
gpe_block->
|
||||
block_base_number];
|
||||
|
||||
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
acpi_ut_remove_reference(pkg_desc);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_get_gpe_xrupt_block
|
||||
*
|
||||
* PARAMETERS: interrupt_number - Interrupt for a GPE block
|
||||
*
|
||||
* RETURN: A GPE interrupt block
|
||||
*
|
||||
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
|
||||
* block per unique interrupt level used for GPEs. Should be
|
||||
* called only when the GPE lists are semaphore locked and not
|
||||
* subject to change.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
|
||||
interrupt_number)
|
||||
{
|
||||
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt;
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
|
||||
|
||||
/* No need for lock since we are not changing any list elements here */
|
||||
|
||||
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (next_gpe_xrupt) {
|
||||
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
|
||||
return_PTR(next_gpe_xrupt);
|
||||
}
|
||||
|
||||
next_gpe_xrupt = next_gpe_xrupt->next;
|
||||
}
|
||||
|
||||
/* Not found, must allocate a new xrupt descriptor */
|
||||
|
||||
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
|
||||
if (!gpe_xrupt) {
|
||||
return_PTR(NULL);
|
||||
}
|
||||
|
||||
gpe_xrupt->interrupt_number = interrupt_number;
|
||||
|
||||
/* Install new interrupt descriptor with spin lock */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
if (acpi_gbl_gpe_xrupt_list_head) {
|
||||
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (next_gpe_xrupt->next) {
|
||||
next_gpe_xrupt = next_gpe_xrupt->next;
|
||||
}
|
||||
|
||||
next_gpe_xrupt->next = gpe_xrupt;
|
||||
gpe_xrupt->previous = next_gpe_xrupt;
|
||||
} else {
|
||||
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
|
||||
/* Install new interrupt handler if not SCI_INT */
|
||||
|
||||
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
|
||||
status = acpi_os_install_interrupt_handler(interrupt_number,
|
||||
acpi_ev_gpe_xrupt_handler,
|
||||
gpe_xrupt);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not install GPE interrupt handler at level 0x%X",
|
||||
interrupt_number));
|
||||
return_PTR(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return_PTR(gpe_xrupt);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_delete_gpe_xrupt
|
||||
*
|
||||
* PARAMETERS: gpe_xrupt - A GPE interrupt info block
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
|
||||
* interrupt handler if not the SCI interrupt.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
|
||||
|
||||
/* We never want to remove the SCI interrupt handler */
|
||||
|
||||
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
|
||||
gpe_xrupt->gpe_block_list_head = NULL;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Disable this interrupt */
|
||||
|
||||
status =
|
||||
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
|
||||
acpi_ev_gpe_xrupt_handler);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Unlink the interrupt block with lock */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
if (gpe_xrupt->previous) {
|
||||
gpe_xrupt->previous->next = gpe_xrupt->next;
|
||||
} else {
|
||||
/* No previous, update list head */
|
||||
|
||||
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
|
||||
}
|
||||
|
||||
if (gpe_xrupt->next) {
|
||||
gpe_xrupt->next->previous = gpe_xrupt->previous;
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
|
||||
/* Free the block */
|
||||
|
||||
ACPI_FREE(gpe_xrupt);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_install_gpe_block
|
||||
|
@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
|
|||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
}
|
||||
|
||||
acpi_current_gpe_count -=
|
||||
gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
|
||||
acpi_current_gpe_count -= gpe_block->gpe_count;
|
||||
|
||||
/* Free the gpe_block */
|
||||
|
||||
|
@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
|
|||
* Allocate the GPE event_info block. There are eight distinct GPEs
|
||||
* per register. Initialization to zeros is sufficient.
|
||||
*/
|
||||
gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
|
||||
register_count *
|
||||
ACPI_GPE_REGISTER_WIDTH) *
|
||||
gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
|
||||
sizeof(struct
|
||||
acpi_gpe_event_info));
|
||||
if (!gpe_event_info) {
|
||||
|
@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
{
|
||||
acpi_status status;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_walk_info walk_info;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
|
||||
|
||||
|
@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
/* Initialize the new GPE block */
|
||||
|
||||
gpe_block->node = gpe_device;
|
||||
gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
|
||||
gpe_block->register_count = register_count;
|
||||
gpe_block->block_base_number = gpe_block_base_number;
|
||||
|
||||
|
@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Find all GPE methods (_Lxx, _Exx) for this block */
|
||||
/* Find all GPE methods (_Lxx or_Exx) for this block */
|
||||
|
||||
walk_info.gpe_block = gpe_block;
|
||||
walk_info.gpe_device = gpe_device;
|
||||
walk_info.enable_this_gpe = FALSE;
|
||||
walk_info.execute_by_owner_id = FALSE;
|
||||
|
||||
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
|
||||
ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
|
||||
acpi_ev_save_method_info, NULL,
|
||||
gpe_block, NULL);
|
||||
acpi_ev_match_gpe_method, NULL,
|
||||
&walk_info, NULL);
|
||||
|
||||
/* Return the new block */
|
||||
|
||||
|
@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
|
||||
(u32) gpe_block->block_base_number,
|
||||
(u32) (gpe_block->block_base_number +
|
||||
((gpe_block->register_count *
|
||||
ACPI_GPE_REGISTER_WIDTH) - 1)),
|
||||
(gpe_block->gpe_count - 1)),
|
||||
gpe_device->name.ascii, gpe_block->register_count,
|
||||
interrupt_number));
|
||||
|
||||
/* Update global count of currently available GPEs */
|
||||
|
||||
acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
|
||||
acpi_current_gpe_count += gpe_block->gpe_count;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
|
@ -969,10 +437,13 @@ acpi_status
|
|||
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
|
||||
struct acpi_gpe_block_info *gpe_block)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
struct acpi_gpe_walk_info gpe_info;
|
||||
struct acpi_gpe_walk_info walk_info;
|
||||
u32 wake_gpe_count;
|
||||
u32 gpe_enabled_count;
|
||||
u32 gpe_index;
|
||||
u32 gpe_number;
|
||||
u32 i;
|
||||
u32 j;
|
||||
|
||||
|
@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
* definition a wake GPE and will not be enabled while the machine
|
||||
* is running.
|
||||
*/
|
||||
gpe_info.gpe_block = gpe_block;
|
||||
gpe_info.gpe_device = gpe_device;
|
||||
walk_info.gpe_block = gpe_block;
|
||||
walk_info.gpe_device = gpe_device;
|
||||
walk_info.execute_by_owner_id = FALSE;
|
||||
|
||||
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
status =
|
||||
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
|
||||
acpi_ev_match_prw_and_gpe, NULL,
|
||||
&gpe_info, NULL);
|
||||
&walk_info, NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"While executing _PRW methods"));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable all GPEs that have a corresponding method and aren't
|
||||
* Enable all GPEs that have a corresponding method and are not
|
||||
* capable of generating wakeups. Any other GPEs within this block
|
||||
* must be enabled via the acpi_enable_gpe() interface.
|
||||
* must be enabled via the acpi_enable_gpe interface.
|
||||
*/
|
||||
wake_gpe_count = 0;
|
||||
gpe_enabled_count = 0;
|
||||
if (gpe_device == acpi_gbl_fadt_gpe_device)
|
||||
|
||||
if (gpe_device == acpi_gbl_fadt_gpe_device) {
|
||||
gpe_device = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < gpe_block->register_count; i++) {
|
||||
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
|
||||
acpi_status status;
|
||||
acpi_size gpe_index;
|
||||
int gpe_number;
|
||||
|
||||
/* Get the info block for this particular GPE */
|
||||
gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
|
||||
|
||||
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
|
||||
gpe_event_info = &gpe_block->event_info[gpe_index];
|
||||
|
||||
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
|
||||
wake_gpe_count++;
|
||||
if (acpi_gbl_leave_wake_gpes_disabled)
|
||||
if (acpi_gbl_leave_wake_gpes_disabled) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
|
||||
/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
|
||||
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Enable this GPE */
|
||||
|
||||
gpe_number = gpe_index + gpe_block->block_base_number;
|
||||
status = acpi_enable_gpe(gpe_device, gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
if (ACPI_FAILURE(status))
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Failed to enable GPE %02X\n",
|
||||
gpe_number));
|
||||
else
|
||||
gpe_enabled_count++;
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"Found %u Wake, Enabled %u Runtime GPEs in this block\n",
|
||||
wake_gpe_count, gpe_enabled_count));
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_gpe_initialize
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Initialize the GPE data structures
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ev_gpe_initialize(void)
|
||||
{
|
||||
u32 register_count0 = 0;
|
||||
u32 register_count1 = 0;
|
||||
u32 gpe_number_max = 0;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the GPE Block(s) defined in the FADT
|
||||
*
|
||||
* Why the GPE register block lengths are divided by 2: From the ACPI
|
||||
* Spec, section "General-Purpose Event Registers", we have:
|
||||
*
|
||||
* "Each register block contains two registers of equal length
|
||||
* GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
|
||||
* GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
|
||||
* The length of the GPE1_STS and GPE1_EN registers is equal to
|
||||
* half the GPE1_LEN. If a generic register block is not supported
|
||||
* then its respective block pointer and block length values in the
|
||||
* FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
|
||||
* to be the same size."
|
||||
*/
|
||||
|
||||
/*
|
||||
* Determine the maximum GPE number for this machine.
|
||||
*
|
||||
* Note: both GPE0 and GPE1 are optional, and either can exist without
|
||||
* the other.
|
||||
*
|
||||
* If EITHER the register length OR the block address are zero, then that
|
||||
* particular block is not supported.
|
||||
*/
|
||||
if (acpi_gbl_FADT.gpe0_block_length &&
|
||||
acpi_gbl_FADT.xgpe0_block.address) {
|
||||
|
||||
/* GPE block 0 exists (has both length and address > 0) */
|
||||
|
||||
register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
|
||||
|
||||
gpe_number_max =
|
||||
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
|
||||
|
||||
/* Install GPE Block 0 */
|
||||
|
||||
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT.xgpe0_block,
|
||||
register_count0, 0,
|
||||
acpi_gbl_FADT.sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks[0]);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not create GPE Block 0"));
|
||||
}
|
||||
}
|
||||
|
||||
if (acpi_gbl_FADT.gpe1_block_length &&
|
||||
acpi_gbl_FADT.xgpe1_block.address) {
|
||||
|
||||
/* GPE block 1 exists (has both length and address > 0) */
|
||||
|
||||
register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
|
||||
|
||||
/* Check for GPE0/GPE1 overlap (if both banks exist) */
|
||||
|
||||
if ((register_count0) &&
|
||||
(gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
|
||||
"(GPE %d to %d) - Ignoring GPE1",
|
||||
gpe_number_max, acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 *
|
||||
ACPI_GPE_REGISTER_WIDTH) - 1)));
|
||||
|
||||
/* Ignore GPE1 block by setting the register count to zero */
|
||||
|
||||
register_count1 = 0;
|
||||
} else {
|
||||
/* Install GPE Block 1 */
|
||||
|
||||
status =
|
||||
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT.xgpe1_block,
|
||||
register_count1,
|
||||
acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.
|
||||
sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks
|
||||
[1]);
|
||||
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not create GPE Block 1"));
|
||||
"Could not enable GPE 0x%02X",
|
||||
gpe_number));
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* GPE0 and GPE1 do not have to be contiguous in the GPE number
|
||||
* space. However, GPE0 always starts at GPE number zero.
|
||||
*/
|
||||
gpe_number_max = acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
|
||||
gpe_enabled_count++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Exit if there are no GPE registers */
|
||||
|
||||
if ((register_count0 + register_count1) == 0) {
|
||||
|
||||
/* GPEs are not required by ACPI, this is OK */
|
||||
|
||||
if (gpe_enabled_count || wake_gpe_count) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"There are no GPE blocks defined in the FADT\n"));
|
||||
status = AE_OK;
|
||||
goto cleanup;
|
||||
"Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
|
||||
gpe_enabled_count, wake_gpe_count));
|
||||
}
|
||||
|
||||
/* Check for Max GPE number out-of-range */
|
||||
|
||||
if (gpe_number_max > ACPI_GPE_MAX) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Maximum GPE number from FADT is too large: 0x%X",
|
||||
gpe_number_max));
|
||||
status = AE_BAD_VALUE;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
|
653
drivers/acpi/acpica/evgpeinit.c
Normal file
653
drivers/acpi/acpica/evgpeinit.c
Normal file
|
@ -0,0 +1,653 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Module Name: evgpeinit - System GPE initialization and update
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2010, Intel Corp.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions, and the following disclaimer,
|
||||
* without modification.
|
||||
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
||||
* substantially similar to the "NO WARRANTY" disclaimer below
|
||||
* ("Disclaimer") and any redistribution must be conditioned upon
|
||||
* including a substantially similar Disclaimer requirement for further
|
||||
* binary redistribution.
|
||||
* 3. Neither the names of the above-listed copyright holders nor the names
|
||||
* of any contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* NO WARRANTY
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
||||
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include "accommon.h"
|
||||
#include "acevents.h"
|
||||
#include "acnamesp.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpeinit")
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_gpe_initialize
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_ev_gpe_initialize(void)
|
||||
{
|
||||
u32 register_count0 = 0;
|
||||
u32 register_count1 = 0;
|
||||
u32 gpe_number_max = 0;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the GPE Block(s) defined in the FADT
|
||||
*
|
||||
* Why the GPE register block lengths are divided by 2: From the ACPI
|
||||
* Spec, section "General-Purpose Event Registers", we have:
|
||||
*
|
||||
* "Each register block contains two registers of equal length
|
||||
* GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
|
||||
* GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
|
||||
* The length of the GPE1_STS and GPE1_EN registers is equal to
|
||||
* half the GPE1_LEN. If a generic register block is not supported
|
||||
* then its respective block pointer and block length values in the
|
||||
* FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
|
||||
* to be the same size."
|
||||
*/
|
||||
|
||||
/*
|
||||
* Determine the maximum GPE number for this machine.
|
||||
*
|
||||
* Note: both GPE0 and GPE1 are optional, and either can exist without
|
||||
* the other.
|
||||
*
|
||||
* If EITHER the register length OR the block address are zero, then that
|
||||
* particular block is not supported.
|
||||
*/
|
||||
if (acpi_gbl_FADT.gpe0_block_length &&
|
||||
acpi_gbl_FADT.xgpe0_block.address) {
|
||||
|
||||
/* GPE block 0 exists (has both length and address > 0) */
|
||||
|
||||
register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
|
||||
|
||||
gpe_number_max =
|
||||
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
|
||||
|
||||
/* Install GPE Block 0 */
|
||||
|
||||
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT.xgpe0_block,
|
||||
register_count0, 0,
|
||||
acpi_gbl_FADT.sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks[0]);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not create GPE Block 0"));
|
||||
}
|
||||
}
|
||||
|
||||
if (acpi_gbl_FADT.gpe1_block_length &&
|
||||
acpi_gbl_FADT.xgpe1_block.address) {
|
||||
|
||||
/* GPE block 1 exists (has both length and address > 0) */
|
||||
|
||||
register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
|
||||
|
||||
/* Check for GPE0/GPE1 overlap (if both banks exist) */
|
||||
|
||||
if ((register_count0) &&
|
||||
(gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
|
||||
"(GPE %u to %u) - Ignoring GPE1",
|
||||
gpe_number_max, acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 *
|
||||
ACPI_GPE_REGISTER_WIDTH) - 1)));
|
||||
|
||||
/* Ignore GPE1 block by setting the register count to zero */
|
||||
|
||||
register_count1 = 0;
|
||||
} else {
|
||||
/* Install GPE Block 1 */
|
||||
|
||||
status =
|
||||
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
|
||||
&acpi_gbl_FADT.xgpe1_block,
|
||||
register_count1,
|
||||
acpi_gbl_FADT.gpe1_base,
|
||||
acpi_gbl_FADT.
|
||||
sci_interrupt,
|
||||
&acpi_gbl_gpe_fadt_blocks
|
||||
[1]);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not create GPE Block 1"));
|
||||
}
|
||||
|
||||
/*
|
||||
* GPE0 and GPE1 do not have to be contiguous in the GPE number
|
||||
* space. However, GPE0 always starts at GPE number zero.
|
||||
*/
|
||||
gpe_number_max = acpi_gbl_FADT.gpe1_base +
|
||||
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Exit if there are no GPE registers */
|
||||
|
||||
if ((register_count0 + register_count1) == 0) {
|
||||
|
||||
/* GPEs are not required by ACPI, this is OK */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
|
||||
"There are no GPE blocks defined in the FADT\n"));
|
||||
status = AE_OK;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Check for Max GPE number out-of-range */
|
||||
|
||||
if (gpe_number_max > ACPI_GPE_MAX) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Maximum GPE number from FADT is too large: 0x%X",
|
||||
gpe_number_max));
|
||||
status = AE_BAD_VALUE;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_update_gpes
|
||||
*
|
||||
* PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
|
||||
* result of a Load() or load_table() operation. If new GPE
|
||||
* methods have been installed, register the new methods and
|
||||
* enable and runtime GPEs that are associated with them. Also,
|
||||
* run any newly loaded _PRW methods in order to discover any
|
||||
* new CAN_WAKE GPEs.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
|
||||
{
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_walk_info walk_info;
|
||||
acpi_status status = AE_OK;
|
||||
u32 new_wake_gpe_count = 0;
|
||||
|
||||
/* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
|
||||
|
||||
walk_info.owner_id = table_owner_id;
|
||||
walk_info.execute_by_owner_id = TRUE;
|
||||
walk_info.count = 0;
|
||||
|
||||
if (acpi_gbl_leave_wake_gpes_disabled) {
|
||||
/*
|
||||
* 1) Run any newly-loaded _PRW methods to find any GPEs that
|
||||
* can now be marked as CAN_WAKE GPEs. Note: We must run the
|
||||
* _PRW methods before we process the _Lxx/_Exx methods because
|
||||
* we will enable all runtime GPEs associated with the new
|
||||
* _Lxx/_Exx methods at the time we process those methods.
|
||||
*
|
||||
* Unlock interpreter so that we can run the _PRW methods.
|
||||
*/
|
||||
walk_info.gpe_block = NULL;
|
||||
walk_info.gpe_device = NULL;
|
||||
|
||||
acpi_ex_exit_interpreter();
|
||||
|
||||
status =
|
||||
acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
ACPI_NS_WALK_NO_UNLOCK,
|
||||
acpi_ev_match_prw_and_gpe, NULL,
|
||||
&walk_info, NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"While executing _PRW methods"));
|
||||
}
|
||||
|
||||
acpi_ex_enter_interpreter();
|
||||
new_wake_gpe_count = walk_info.count;
|
||||
}
|
||||
|
||||
/*
|
||||
* 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
|
||||
*
|
||||
* Any GPEs that correspond to new _Lxx/_Exx methods and are not
|
||||
* marked as CAN_WAKE are immediately enabled.
|
||||
*
|
||||
* Examine the namespace underneath each gpe_device within the
|
||||
* gpe_block lists.
|
||||
*/
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return;
|
||||
}
|
||||
|
||||
walk_info.count = 0;
|
||||
walk_info.enable_this_gpe = TRUE;
|
||||
|
||||
/* Walk the interrupt level descriptor list */
|
||||
|
||||
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (gpe_xrupt_info) {
|
||||
|
||||
/* Walk all Gpe Blocks attached to this interrupt level */
|
||||
|
||||
gpe_block = gpe_xrupt_info->gpe_block_list_head;
|
||||
while (gpe_block) {
|
||||
walk_info.gpe_block = gpe_block;
|
||||
walk_info.gpe_device = gpe_block->node;
|
||||
|
||||
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
|
||||
walk_info.gpe_device,
|
||||
ACPI_UINT32_MAX,
|
||||
ACPI_NS_WALK_NO_UNLOCK,
|
||||
acpi_ev_match_gpe_method,
|
||||
NULL, &walk_info, NULL);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"While decoding _Lxx/_Exx methods"));
|
||||
}
|
||||
|
||||
gpe_block = gpe_block->next;
|
||||
}
|
||||
|
||||
gpe_xrupt_info = gpe_xrupt_info->next;
|
||||
}
|
||||
|
||||
if (walk_info.count || new_wake_gpe_count) {
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Enabled %u new runtime GPEs, added %u new wakeup GPEs",
|
||||
walk_info.count, new_wake_gpe_count));
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
return;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_match_gpe_method
|
||||
*
|
||||
* PARAMETERS: Callback from walk_namespace
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
|
||||
* control method under the _GPE portion of the namespace.
|
||||
* Extract the name and GPE type from the object, saving this
|
||||
* information for quick lookup during GPE dispatch. Allows a
|
||||
* per-owner_id evaluation if execute_by_owner_id is TRUE in the
|
||||
* walk_info parameter block.
|
||||
*
|
||||
* The name of each GPE control method is of the form:
|
||||
* "_Lxx" or "_Exx", where:
|
||||
* L - means that the GPE is level triggered
|
||||
* E - means that the GPE is edge triggered
|
||||
* xx - is the GPE number [in HEX]
|
||||
*
|
||||
* If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
|
||||
* with that owner.
|
||||
* If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
|
||||
* method is immediately enabled (Used for Load/load_table operators)
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_match_gpe_method(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value)
|
||||
{
|
||||
struct acpi_namespace_node *method_node =
|
||||
ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
|
||||
struct acpi_gpe_walk_info *walk_info =
|
||||
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
struct acpi_namespace_node *gpe_device;
|
||||
acpi_status status;
|
||||
u32 gpe_number;
|
||||
char name[ACPI_NAME_SIZE + 1];
|
||||
u8 type;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_match_gpe_method);
|
||||
|
||||
/* Check if requested owner_id matches this owner_id */
|
||||
|
||||
if ((walk_info->execute_by_owner_id) &&
|
||||
(method_node->owner_id != walk_info->owner_id)) {
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Match and decode the _Lxx and _Exx GPE method names
|
||||
*
|
||||
* 1) Extract the method name and null terminate it
|
||||
*/
|
||||
ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
|
||||
name[ACPI_NAME_SIZE] = 0;
|
||||
|
||||
/* 2) Name must begin with an underscore */
|
||||
|
||||
if (name[0] != '_') {
|
||||
return_ACPI_STATUS(AE_OK); /* Ignore this method */
|
||||
}
|
||||
|
||||
/*
|
||||
* 3) Edge/Level determination is based on the 2nd character
|
||||
* of the method name
|
||||
*
|
||||
* NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
|
||||
* found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
|
||||
*/
|
||||
switch (name[1]) {
|
||||
case 'L':
|
||||
type = ACPI_GPE_LEVEL_TRIGGERED;
|
||||
break;
|
||||
|
||||
case 'E':
|
||||
type = ACPI_GPE_EDGE_TRIGGERED;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Unknown method type, just ignore it */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Ignoring unknown GPE method type: %s "
|
||||
"(name not of form _Lxx or _Exx)", name));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* 4) The last two characters of the name are the hex GPE Number */
|
||||
|
||||
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
|
||||
if (gpe_number == ACPI_UINT32_MAX) {
|
||||
|
||||
/* Conversion failed; invalid method, just ignore it */
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Could not extract GPE number from name: %s "
|
||||
"(name is not of form _Lxx or _Exx)", name));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Ensure that we have a valid GPE number for this GPE block */
|
||||
|
||||
gpe_event_info =
|
||||
acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
|
||||
if (!gpe_event_info) {
|
||||
/*
|
||||
* This gpe_number is not valid for this GPE block, just ignore it.
|
||||
* However, it may be valid for a different GPE block, since GPE0
|
||||
* and GPE1 methods both appear under \_GPE.
|
||||
*/
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
|
||||
ACPI_GPE_DISPATCH_HANDLER) {
|
||||
|
||||
/* If there is already a handler, ignore this GPE method */
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
|
||||
ACPI_GPE_DISPATCH_METHOD) {
|
||||
/*
|
||||
* If there is already a method, ignore this method. But check
|
||||
* for a type mismatch (if both the _Lxx AND _Exx exist)
|
||||
*/
|
||||
if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
|
||||
gpe_number, gpe_number, gpe_number));
|
||||
}
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the GPE information from above to the gpe_event_info block for
|
||||
* use during dispatch of this GPE.
|
||||
*/
|
||||
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
|
||||
gpe_event_info->dispatch.method_node = method_node;
|
||||
|
||||
/*
|
||||
* Enable this GPE if requested. This only happens when during the
|
||||
* execution of a Load or load_table operator. We have found a new
|
||||
* GPE method and want to immediately enable the GPE if it is a
|
||||
* runtime GPE.
|
||||
*/
|
||||
if (walk_info->enable_this_gpe) {
|
||||
|
||||
/* Ignore GPEs that can wake the system */
|
||||
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
|
||||
!acpi_gbl_leave_wake_gpes_disabled) {
|
||||
walk_info->count++;
|
||||
gpe_device = walk_info->gpe_device;
|
||||
|
||||
if (gpe_device == acpi_gbl_fadt_gpe_device) {
|
||||
gpe_device = NULL;
|
||||
}
|
||||
|
||||
status = acpi_enable_gpe(gpe_device, gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not enable GPE 0x%02X",
|
||||
gpe_number));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
|
||||
"Registered GPE method %s as GPE number 0x%.2X\n",
|
||||
name, gpe_number));
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_match_prw_and_gpe
|
||||
*
|
||||
* PARAMETERS: Callback from walk_namespace
|
||||
*
|
||||
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
|
||||
* not aborted on a single _PRW failure.
|
||||
*
|
||||
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
|
||||
* Device. Run the _PRW method. If present, extract the GPE
|
||||
* number and mark the GPE as a CAN_WAKE GPE. Allows a
|
||||
* per-owner_id execution if execute_by_owner_id is TRUE in the
|
||||
* walk_info parameter block.
|
||||
*
|
||||
* If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
|
||||
* owner.
|
||||
* If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
|
||||
* we only execute _PRWs that refer to the input gpe_device.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
|
||||
u32 level, void *context, void **return_value)
|
||||
{
|
||||
struct acpi_gpe_walk_info *walk_info =
|
||||
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
|
||||
struct acpi_namespace_node *gpe_device;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_namespace_node *target_gpe_device;
|
||||
struct acpi_namespace_node *prw_node;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
union acpi_operand_object *pkg_desc;
|
||||
union acpi_operand_object *obj_desc;
|
||||
u32 gpe_number;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
|
||||
|
||||
/* Check for a _PRW method under this device */
|
||||
|
||||
status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
|
||||
ACPI_NS_NO_UPSEARCH, &prw_node);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Check if requested owner_id matches this owner_id */
|
||||
|
||||
if ((walk_info->execute_by_owner_id) &&
|
||||
(prw_node->owner_id != walk_info->owner_id)) {
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Execute the _PRW */
|
||||
|
||||
status = acpi_ut_evaluate_object(prw_node, NULL,
|
||||
ACPI_BTYPE_PACKAGE, &pkg_desc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* The returned _PRW package must have at least two elements */
|
||||
|
||||
if (pkg_desc->package.count < 2) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Extract pointers from the input context */
|
||||
|
||||
gpe_device = walk_info->gpe_device;
|
||||
gpe_block = walk_info->gpe_block;
|
||||
|
||||
/*
|
||||
* The _PRW object must return a package, we are only interested
|
||||
* in the first element
|
||||
*/
|
||||
obj_desc = pkg_desc->package.elements[0];
|
||||
|
||||
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
|
||||
|
||||
/* Use FADT-defined GPE device (from definition of _PRW) */
|
||||
|
||||
target_gpe_device = NULL;
|
||||
if (gpe_device) {
|
||||
target_gpe_device = acpi_gbl_fadt_gpe_device;
|
||||
}
|
||||
|
||||
/* Integer is the GPE number in the FADT described GPE blocks */
|
||||
|
||||
gpe_number = (u32)obj_desc->integer.value;
|
||||
} else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
|
||||
|
||||
/* Package contains a GPE reference and GPE number within a GPE block */
|
||||
|
||||
if ((obj_desc->package.count < 2) ||
|
||||
((obj_desc->package.elements[0])->common.type !=
|
||||
ACPI_TYPE_LOCAL_REFERENCE) ||
|
||||
((obj_desc->package.elements[1])->common.type !=
|
||||
ACPI_TYPE_INTEGER)) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Get GPE block reference and decode */
|
||||
|
||||
target_gpe_device =
|
||||
obj_desc->package.elements[0]->reference.node;
|
||||
gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
|
||||
} else {
|
||||
/* Unknown type, just ignore it */
|
||||
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Get the gpe_event_info for this GPE */
|
||||
|
||||
if (gpe_device) {
|
||||
/*
|
||||
* Is this GPE within this block?
|
||||
*
|
||||
* TRUE if and only if these conditions are true:
|
||||
* 1) The GPE devices match.
|
||||
* 2) The GPE index(number) is within the range of the Gpe Block
|
||||
* associated with the GPE device.
|
||||
*/
|
||||
if (gpe_device != target_gpe_device) {
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
gpe_event_info =
|
||||
acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
|
||||
} else {
|
||||
/* gpe_device is NULL, just match the target_device and gpe_number */
|
||||
|
||||
gpe_event_info =
|
||||
acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
|
||||
}
|
||||
|
||||
if (gpe_event_info) {
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
|
||||
|
||||
/* This GPE can wake the system */
|
||||
|
||||
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
|
||||
walk_info->count++;
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
acpi_ut_remove_reference(pkg_desc);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
337
drivers/acpi/acpica/evgpeutil.c
Normal file
337
drivers/acpi/acpica/evgpeutil.c
Normal file
|
@ -0,0 +1,337 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Module Name: evgpeutil - GPE utilities
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2010, Intel Corp.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions, and the following disclaimer,
|
||||
* without modification.
|
||||
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
||||
* substantially similar to the "NO WARRANTY" disclaimer below
|
||||
* ("Disclaimer") and any redistribution must be conditioned upon
|
||||
* including a substantially similar Disclaimer requirement for further
|
||||
* binary redistribution.
|
||||
* 3. Neither the names of the above-listed copyright holders nor the names
|
||||
* of any contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* NO WARRANTY
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
||||
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include "accommon.h"
|
||||
#include "acevents.h"
|
||||
|
||||
#define _COMPONENT ACPI_EVENTS
|
||||
ACPI_MODULE_NAME("evgpeutil")
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_walk_gpe_list
|
||||
*
|
||||
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
|
||||
* Context - Value passed to callback
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Walk the GPE lists.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
|
||||
{
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
|
||||
acpi_status status = AE_OK;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
/* Walk the interrupt level descriptor list */
|
||||
|
||||
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (gpe_xrupt_info) {
|
||||
|
||||
/* Walk all Gpe Blocks attached to this interrupt level */
|
||||
|
||||
gpe_block = gpe_xrupt_info->gpe_block_list_head;
|
||||
while (gpe_block) {
|
||||
|
||||
/* One callback per GPE block */
|
||||
|
||||
status =
|
||||
gpe_walk_callback(gpe_xrupt_info, gpe_block,
|
||||
context);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_CTRL_END) { /* Callback abort */
|
||||
status = AE_OK;
|
||||
}
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
gpe_block = gpe_block->next;
|
||||
}
|
||||
|
||||
gpe_xrupt_info = gpe_xrupt_info->next;
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_valid_gpe_event
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - Info for this GPE
|
||||
*
|
||||
* RETURN: TRUE if the gpe_event is valid
|
||||
*
|
||||
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
|
||||
* Should be called only when the GPE lists are semaphore locked
|
||||
* and not subject to change.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
|
||||
struct acpi_gpe_block_info *gpe_block;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* No need for spin lock since we are not changing any list elements */
|
||||
|
||||
/* Walk the GPE interrupt levels */
|
||||
|
||||
gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (gpe_xrupt_block) {
|
||||
gpe_block = gpe_xrupt_block->gpe_block_list_head;
|
||||
|
||||
/* Walk the GPE blocks on this interrupt level */
|
||||
|
||||
while (gpe_block) {
|
||||
if ((&gpe_block->event_info[0] <= gpe_event_info) &&
|
||||
(&gpe_block->event_info[gpe_block->gpe_count] >
|
||||
gpe_event_info)) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
gpe_block = gpe_block->next;
|
||||
}
|
||||
|
||||
gpe_xrupt_block = gpe_xrupt_block->next;
|
||||
}
|
||||
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_get_gpe_xrupt_block
|
||||
*
|
||||
* PARAMETERS: interrupt_number - Interrupt for a GPE block
|
||||
*
|
||||
* RETURN: A GPE interrupt block
|
||||
*
|
||||
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
|
||||
* block per unique interrupt level used for GPEs. Should be
|
||||
* called only when the GPE lists are semaphore locked and not
|
||||
* subject to change.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
|
||||
{
|
||||
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
|
||||
struct acpi_gpe_xrupt_info *gpe_xrupt;
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
|
||||
|
||||
/* No need for lock since we are not changing any list elements here */
|
||||
|
||||
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (next_gpe_xrupt) {
|
||||
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
|
||||
return_PTR(next_gpe_xrupt);
|
||||
}
|
||||
|
||||
next_gpe_xrupt = next_gpe_xrupt->next;
|
||||
}
|
||||
|
||||
/* Not found, must allocate a new xrupt descriptor */
|
||||
|
||||
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
|
||||
if (!gpe_xrupt) {
|
||||
return_PTR(NULL);
|
||||
}
|
||||
|
||||
gpe_xrupt->interrupt_number = interrupt_number;
|
||||
|
||||
/* Install new interrupt descriptor with spin lock */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
if (acpi_gbl_gpe_xrupt_list_head) {
|
||||
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
|
||||
while (next_gpe_xrupt->next) {
|
||||
next_gpe_xrupt = next_gpe_xrupt->next;
|
||||
}
|
||||
|
||||
next_gpe_xrupt->next = gpe_xrupt;
|
||||
gpe_xrupt->previous = next_gpe_xrupt;
|
||||
} else {
|
||||
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
|
||||
/* Install new interrupt handler if not SCI_INT */
|
||||
|
||||
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
|
||||
status = acpi_os_install_interrupt_handler(interrupt_number,
|
||||
acpi_ev_gpe_xrupt_handler,
|
||||
gpe_xrupt);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not install GPE interrupt handler at level 0x%X",
|
||||
interrupt_number));
|
||||
return_PTR(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return_PTR(gpe_xrupt);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_delete_gpe_xrupt
|
||||
*
|
||||
* PARAMETERS: gpe_xrupt - A GPE interrupt info block
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
|
||||
* interrupt handler if not the SCI interrupt.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
|
||||
|
||||
/* We never want to remove the SCI interrupt handler */
|
||||
|
||||
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
|
||||
gpe_xrupt->gpe_block_list_head = NULL;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/* Disable this interrupt */
|
||||
|
||||
status =
|
||||
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
|
||||
acpi_ev_gpe_xrupt_handler);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Unlink the interrupt block with lock */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
if (gpe_xrupt->previous) {
|
||||
gpe_xrupt->previous->next = gpe_xrupt->next;
|
||||
} else {
|
||||
/* No previous, update list head */
|
||||
|
||||
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
|
||||
}
|
||||
|
||||
if (gpe_xrupt->next) {
|
||||
gpe_xrupt->next->previous = gpe_xrupt->previous;
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
|
||||
/* Free the block */
|
||||
|
||||
ACPI_FREE(gpe_xrupt);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_delete_gpe_handlers
|
||||
*
|
||||
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
||||
* gpe_block - Gpe Block info
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Delete all Handler objects found in the GPE data structs.
|
||||
* Used only prior to termination.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *context)
|
||||
{
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
u32 i;
|
||||
u32 j;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
|
||||
|
||||
/* Examine each GPE Register within the block */
|
||||
|
||||
for (i = 0; i < gpe_block->register_count; i++) {
|
||||
|
||||
/* Now look at the individual GPEs in this byte register */
|
||||
|
||||
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
|
||||
gpe_event_info = &gpe_block->event_info[((acpi_size) i *
|
||||
ACPI_GPE_REGISTER_WIDTH)
|
||||
+ j];
|
||||
|
||||
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
|
||||
ACPI_GPE_DISPATCH_HANDLER) {
|
||||
ACPI_FREE(gpe_event_info->dispatch.handler);
|
||||
gpe_event_info->dispatch.handler = NULL;
|
||||
gpe_event_info->flags &=
|
||||
~ACPI_GPE_DISPATCH_MASK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
|
@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
|
|||
status = acpi_disable_event(i, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not disable fixed event %d",
|
||||
"Could not disable fixed event %u",
|
||||
(u32) i));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
|
|||
if (ACPI_SUCCESS(status))
|
||||
status = acpi_enable_event(event, 0);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
|
||||
ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
|
||||
event));
|
||||
|
||||
/* Remove the handler */
|
||||
|
@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
|
|||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Could not write to fixed event enable register %X",
|
||||
"Could not write to fixed event enable register 0x%X",
|
||||
event));
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
|
||||
|
@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
|
|||
|
||||
/* Parameter validation */
|
||||
|
||||
if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
|
||||
status = AE_BAD_PARAMETER;
|
||||
goto exit;
|
||||
if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto exit;
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Ensure that we have a valid GPE number */
|
||||
|
@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
|
|||
handler->context = context;
|
||||
handler->method_node = gpe_event_info->dispatch.method_node;
|
||||
|
||||
/* Disable the GPE before installing the handler */
|
||||
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE (status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Install the handler */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
|
|||
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
|
||||
unlock_and_exit:
|
||||
unlock_and_exit:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
exit:
|
||||
if (ACPI_FAILURE(status))
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Installing notify handler failed"));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
|
|
@ -203,21 +203,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
|
|||
*
|
||||
* FUNCTION: acpi_set_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_device - Parent GPE Device
|
||||
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
|
||||
* gpe_number - GPE level within the GPE block
|
||||
* action - Enable or disable
|
||||
* Called from ISR or not
|
||||
* action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enable or disable an ACPI event (general purpose)
|
||||
* DESCRIPTION: Enable or disable an individual GPE. This function bypasses
|
||||
* the reference count mechanism used in the acpi_enable_gpe and
|
||||
* acpi_disable_gpe interfaces -- and should be used with care.
|
||||
*
|
||||
* Note: Typically used to disable a runtime GPE for short period of time,
|
||||
* then re-enable it, without disturbing the existing reference counts. This
|
||||
* is useful, for example, in the Embedded Controller (EC) driver.
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
acpi_cpu_flags flags;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_set_gpe);
|
||||
|
||||
|
@ -243,7 +248,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Invalid action\n"));
|
||||
status = AE_BAD_PARAMETER;
|
||||
break;
|
||||
}
|
||||
|
@ -259,25 +263,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
|
|||
*
|
||||
* FUNCTION: acpi_enable_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_device - Parent GPE Device
|
||||
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
|
||||
* gpe_number - GPE level within the GPE block
|
||||
* type - Purpose the GPE will be used for
|
||||
* gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
|
||||
* or both
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Take a reference to a GPE and enable it if necessary
|
||||
* DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
|
||||
* hardware-enabled (for runtime GPEs), or the GPE register mask
|
||||
* is updated (for wake GPEs).
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
|
||||
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
acpi_cpu_flags flags;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
|
||||
|
||||
if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
|
||||
/* Parameter validation */
|
||||
|
||||
if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
|
@ -289,26 +299,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
|
|||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
if (type & ACPI_GPE_TYPE_RUNTIME) {
|
||||
if (++gpe_event_info->runtime_count == 1) {
|
||||
if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
|
||||
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
|
||||
status = AE_LIMIT; /* Too many references */
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
gpe_event_info->runtime_count++;
|
||||
if (gpe_event_info->runtime_count == 1) {
|
||||
status = acpi_ev_enable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status))
|
||||
if (ACPI_FAILURE(status)) {
|
||||
gpe_event_info->runtime_count--;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (type & ACPI_GPE_TYPE_WAKE) {
|
||||
if (gpe_type & ACPI_GPE_TYPE_WAKE) {
|
||||
/* The GPE must have the ability to wake the system */
|
||||
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
|
||||
status = AE_BAD_PARAMETER;
|
||||
status = AE_TYPE;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
|
||||
status = AE_LIMIT; /* Too many references */
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake-up GPEs are only enabled right prior to putting the
|
||||
* system into a sleep state.
|
||||
* Update the enable mask on the first wakeup reference. Wake GPEs
|
||||
* are only hardware-enabled just before sleeping.
|
||||
*/
|
||||
if (++gpe_event_info->wakeup_count == 1)
|
||||
acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
gpe_event_info->wakeup_count++;
|
||||
if (gpe_event_info->wakeup_count == 1) {
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
}
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
|
@ -321,27 +348,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
|
|||
*
|
||||
* FUNCTION: acpi_disable_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_device - Parent GPE Device
|
||||
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
|
||||
* gpe_number - GPE level within the GPE block
|
||||
* type - Purpose the GPE won't be used for any more
|
||||
* gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
|
||||
* or both
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Release a reference to a GPE and disable it if necessary
|
||||
* DESCRIPTION: Remove a reference to a GPE. When the last reference is
|
||||
* removed, only then is the GPE disabled (for runtime GPEs), or
|
||||
* the GPE mask bit disabled (for wake GPEs)
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
|
||||
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
acpi_cpu_flags flags;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
|
||||
|
||||
if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
|
||||
/* Parameter validation */
|
||||
|
||||
if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
/* Ensure that we have a valid GPE number */
|
||||
|
||||
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
|
||||
|
@ -350,18 +384,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
|
|||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
|
||||
if (--gpe_event_info->runtime_count == 0)
|
||||
/* Hardware-disable a runtime GPE on removal of the last reference */
|
||||
|
||||
if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
|
||||
if (!gpe_event_info->runtime_count) {
|
||||
status = AE_LIMIT; /* There are no references to remove */
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
gpe_event_info->runtime_count--;
|
||||
if (!gpe_event_info->runtime_count) {
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
gpe_event_info->runtime_count++;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
|
||||
/*
|
||||
* Wake-up GPEs are not enabled after leaving system sleep
|
||||
* states, so we don't need to disable them here.
|
||||
*/
|
||||
if (--gpe_event_info->wakeup_count == 0)
|
||||
acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
/*
|
||||
* Update masks for wake GPE on removal of the last reference.
|
||||
* No need to hardware-disable wake GPEs here, they are not currently
|
||||
* enabled.
|
||||
*/
|
||||
if (gpe_type & ACPI_GPE_TYPE_WAKE) {
|
||||
if (!gpe_event_info->wakeup_count) {
|
||||
status = AE_LIMIT; /* There are no references to remove */
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
gpe_event_info->wakeup_count--;
|
||||
if (!gpe_event_info->wakeup_count) {
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
}
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
|
@ -465,30 +520,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
|
|||
*
|
||||
* FUNCTION: acpi_clear_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_device - Parent GPE Device
|
||||
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
|
||||
* gpe_number - GPE level within the GPE block
|
||||
* Flags - Called from an ISR or not
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Clear an ACPI event (general purpose)
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
|
||||
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
|
||||
|
||||
/* Use semaphore lock if not executing at interrupt level */
|
||||
|
||||
if (flags & ACPI_NOT_ISR) {
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
/* Ensure that we have a valid GPE number */
|
||||
|
||||
|
@ -501,9 +549,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
|
|||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
|
||||
unlock_and_exit:
|
||||
if (flags & ACPI_NOT_ISR) {
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -569,9 +615,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
|
|||
*
|
||||
* FUNCTION: acpi_get_gpe_status
|
||||
*
|
||||
* PARAMETERS: gpe_device - Parent GPE Device
|
||||
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
|
||||
* gpe_number - GPE level within the GPE block
|
||||
* Flags - Called from an ISR or not
|
||||
* event_status - Where the current status of the event will
|
||||
* be returned
|
||||
*
|
||||
|
@ -582,21 +627,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
|
|||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_get_gpe_status(acpi_handle gpe_device,
|
||||
u32 gpe_number, u32 flags, acpi_event_status * event_status)
|
||||
u32 gpe_number, acpi_event_status *event_status)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_gpe_event_info *gpe_event_info;
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
|
||||
|
||||
/* Use semaphore lock if not executing at interrupt level */
|
||||
|
||||
if (flags & ACPI_NOT_ISR) {
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
||||
/* Ensure that we have a valid GPE number */
|
||||
|
||||
|
@ -614,9 +653,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
|
|||
*event_status |= ACPI_EVENT_FLAG_HANDLE;
|
||||
|
||||
unlock_and_exit:
|
||||
if (flags & ACPI_NOT_ISR) {
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
}
|
||||
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -673,20 +710,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
|
|||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Run the _PRW methods and enable the GPEs */
|
||||
|
||||
status = acpi_ev_initialize_gpe_block(node, gpe_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Get the device_object attached to the node */
|
||||
/* Install block in the device_object attached to the node */
|
||||
|
||||
obj_desc = acpi_ns_get_attached_object(node);
|
||||
if (!obj_desc) {
|
||||
|
||||
/* No object, create a new one */
|
||||
|
||||
/*
|
||||
* No object, create a new one (Device nodes do not always have
|
||||
* an attached object)
|
||||
*/
|
||||
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
|
||||
if (!obj_desc) {
|
||||
status = AE_NO_MEMORY;
|
||||
|
@ -705,10 +737,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
|
|||
}
|
||||
}
|
||||
|
||||
/* Install the GPE block in the device_object */
|
||||
/* Now install the GPE block in the device_object */
|
||||
|
||||
obj_desc->device.gpe_block = gpe_block;
|
||||
|
||||
/* Run the _PRW methods and enable the runtime GPEs in the new block */
|
||||
|
||||
status = acpi_ev_initialize_gpe_block(node, gpe_block);
|
||||
|
||||
unlock_and_exit:
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(status);
|
||||
|
@ -839,8 +875,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
|
||||
/* Increment Index by the number of GPEs in this block */
|
||||
|
||||
info->next_block_base_index +=
|
||||
(gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
|
||||
info->next_block_base_index += gpe_block->gpe_count;
|
||||
|
||||
if (info->index < info->next_block_base_index) {
|
||||
/*
|
||||
|
|
|
@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
|
|||
struct acpi_namespace_node *parent_node,
|
||||
union acpi_operand_object **ddb_handle)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_operand_object *obj_desc;
|
||||
acpi_status status;
|
||||
acpi_owner_id owner_id;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_add_table);
|
||||
|
||||
|
@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
|
|||
acpi_ns_exec_module_code_list();
|
||||
acpi_ex_enter_interpreter();
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
/* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
|
||||
|
||||
status = acpi_tb_get_owner_id(table_index, &owner_id);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
acpi_ev_update_gpes(owner_id);
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
|
|||
|
||||
status = acpi_get_table_by_index(table_index, &table);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
|
||||
table->signature, table->oem_id,
|
||||
table->oem_table_id));
|
||||
ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
|
||||
acpi_tb_print_table_header(0, table);
|
||||
}
|
||||
|
||||
/* Invoke table handler if present */
|
||||
|
@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
|
||||
acpi_tb_print_table_header(0, table_desc.pointer);
|
||||
|
||||
/* Remove the reference by added by acpi_ex_store above */
|
||||
|
||||
acpi_ut_remove_reference(ddb_handle);
|
||||
|
|
|
@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
|
|||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Bad destination type during conversion: %X",
|
||||
"Bad destination type during conversion: 0x%X",
|
||||
destination_type));
|
||||
status = AE_AML_INTERNAL;
|
||||
break;
|
||||
|
@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
|
|||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
|
||||
"Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
|
||||
GET_CURRENT_ARG_TYPE(walk_state->op_info->
|
||||
runtime_args),
|
||||
walk_state->opcode,
|
||||
|
|
|
@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
|
|||
*/
|
||||
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
|
||||
(region_space < ACPI_USER_REGION_BEGIN)) {
|
||||
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
|
||||
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
|
||||
region_space));
|
||||
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
|
||||
acpi_ut_get_region_name(region_space), region_space));
|
||||
|
||||
/* Create the region descriptor */
|
||||
|
|
261
drivers/acpi/acpica/exdebug.c
Normal file
261
drivers/acpi/acpica/exdebug.c
Normal file
|
@ -0,0 +1,261 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* Module Name: exdebug - Support for stores to the AML Debug Object
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2000 - 2010, Intel Corp.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions, and the following disclaimer,
|
||||
* without modification.
|
||||
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
||||
* substantially similar to the "NO WARRANTY" disclaimer below
|
||||
* ("Disclaimer") and any redistribution must be conditioned upon
|
||||
* including a substantially similar Disclaimer requirement for further
|
||||
* binary redistribution.
|
||||
* 3. Neither the names of the above-listed copyright holders nor the names
|
||||
* of any contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* NO WARRANTY
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
||||
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGES.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include "accommon.h"
|
||||
#include "acinterp.h"
|
||||
|
||||
#define _COMPONENT ACPI_EXECUTER
|
||||
ACPI_MODULE_NAME("exdebug")
|
||||
|
||||
#ifndef ACPI_NO_ERROR_MESSAGES
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_do_debug_object
|
||||
*
|
||||
* PARAMETERS: source_desc - Object to be output to "Debug Object"
|
||||
* Level - Indentation level (used for packages)
|
||||
* Index - Current package element, zero if not pkg
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Handles stores to the AML Debug Object. For example:
|
||||
* Store(INT1, Debug)
|
||||
*
|
||||
* This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
|
||||
*
|
||||
* This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
|
||||
* if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
|
||||
* operational case, stores to the debug object are ignored but can be easily
|
||||
* enabled if necessary.
|
||||
*
|
||||
******************************************************************************/
|
||||
void
|
||||
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
|
||||
u32 level, u32 index)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
|
||||
|
||||
/* Output must be enabled via the debug_object global or the dbg_level */
|
||||
|
||||
if (!acpi_gbl_enable_aml_debug_object &&
|
||||
!(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*
|
||||
* Print line header as long as we are not in the middle of an
|
||||
* object display
|
||||
*/
|
||||
if (!((level > 0) && index == 0)) {
|
||||
acpi_os_printf("[ACPI Debug] %*s", level, " ");
|
||||
}
|
||||
|
||||
/* Display the index for package output only */
|
||||
|
||||
if (index > 0) {
|
||||
acpi_os_printf("(%.2u) ", index - 1);
|
||||
}
|
||||
|
||||
if (!source_desc) {
|
||||
acpi_os_printf("[Null Object]\n");
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
|
||||
acpi_os_printf("%s ",
|
||||
acpi_ut_get_object_type_name(source_desc));
|
||||
|
||||
if (!acpi_ut_valid_internal_object(source_desc)) {
|
||||
acpi_os_printf("%p, Invalid Internal Object!\n",
|
||||
source_desc);
|
||||
return_VOID;
|
||||
}
|
||||
} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
acpi_os_printf("%s: %p\n",
|
||||
acpi_ut_get_type_name(((struct
|
||||
acpi_namespace_node *)
|
||||
source_desc)->type),
|
||||
source_desc);
|
||||
return_VOID;
|
||||
} else {
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/* source_desc is of type ACPI_DESC_TYPE_OPERAND */
|
||||
|
||||
switch (source_desc->common.type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
|
||||
/* Output correct integer width */
|
||||
|
||||
if (acpi_gbl_integer_byte_width == 4) {
|
||||
acpi_os_printf("0x%8.8X\n",
|
||||
(u32)source_desc->integer.value);
|
||||
} else {
|
||||
acpi_os_printf("0x%8.8X%8.8X\n",
|
||||
ACPI_FORMAT_UINT64(source_desc->integer.
|
||||
value));
|
||||
}
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_BUFFER:
|
||||
|
||||
acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
|
||||
acpi_ut_dump_buffer2(source_desc->buffer.pointer,
|
||||
(source_desc->buffer.length < 256) ?
|
||||
source_desc->buffer.length : 256,
|
||||
DB_BYTE_DISPLAY);
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_STRING:
|
||||
|
||||
acpi_os_printf("[0x%.2X] \"%s\"\n",
|
||||
source_desc->string.length,
|
||||
source_desc->string.pointer);
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_PACKAGE:
|
||||
|
||||
acpi_os_printf("[Contains 0x%.2X Elements]\n",
|
||||
source_desc->package.count);
|
||||
|
||||
/* Output the entire contents of the package */
|
||||
|
||||
for (i = 0; i < source_desc->package.count; i++) {
|
||||
acpi_ex_do_debug_object(source_desc->package.
|
||||
elements[i], level + 4, i + 1);
|
||||
}
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_REFERENCE:
|
||||
|
||||
acpi_os_printf("[%s] ",
|
||||
acpi_ut_get_reference_name(source_desc));
|
||||
|
||||
/* Decode the reference */
|
||||
|
||||
switch (source_desc->reference.class) {
|
||||
case ACPI_REFCLASS_INDEX:
|
||||
|
||||
acpi_os_printf("0x%X\n", source_desc->reference.value);
|
||||
break;
|
||||
|
||||
case ACPI_REFCLASS_TABLE:
|
||||
|
||||
/* Case for ddb_handle */
|
||||
|
||||
acpi_os_printf("Table Index 0x%X\n",
|
||||
source_desc->reference.value);
|
||||
return;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
acpi_os_printf(" ");
|
||||
|
||||
/* Check for valid node first, then valid object */
|
||||
|
||||
if (source_desc->reference.node) {
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE
|
||||
(source_desc->reference.node) !=
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
acpi_os_printf
|
||||
(" %p - Not a valid namespace node\n",
|
||||
source_desc->reference.node);
|
||||
} else {
|
||||
acpi_os_printf("Node %p [%4.4s] ",
|
||||
source_desc->reference.node,
|
||||
(source_desc->reference.node)->
|
||||
name.ascii);
|
||||
|
||||
switch ((source_desc->reference.node)->type) {
|
||||
|
||||
/* These types have no attached object */
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
acpi_os_printf("Device\n");
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_THERMAL:
|
||||
acpi_os_printf("Thermal Zone\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
acpi_ex_do_debug_object((source_desc->
|
||||
reference.
|
||||
node)->object,
|
||||
level + 4, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (source_desc->reference.object) {
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE
|
||||
(source_desc->reference.object) ==
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
acpi_ex_do_debug_object(((struct
|
||||
acpi_namespace_node *)
|
||||
source_desc->reference.
|
||||
object)->object,
|
||||
level + 4, 0);
|
||||
} else {
|
||||
acpi_ex_do_debug_object(source_desc->reference.
|
||||
object, level + 4, 0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
acpi_os_printf("%p\n", source_desc);
|
||||
break;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
|
||||
return_VOID;
|
||||
}
|
||||
#endif
|
|
@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
|
|||
|
||||
if (source_desc->buffer.length < length) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"SMBus or IPMI write requires Buffer of length %X, found length %X",
|
||||
"SMBus or IPMI write requires Buffer of length %u, found length %u",
|
||||
length, source_desc->buffer.length));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
|
||||
|
|
|
@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
|
|||
/* We must have a valid region */
|
||||
|
||||
if (rgn_desc->common.type != ACPI_TYPE_REGION) {
|
||||
ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
|
||||
ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
|
||||
rgn_desc->common.type,
|
||||
acpi_ut_get_object_type_name(rgn_desc)));
|
||||
|
||||
|
@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
|
|||
* byte, and a field with Dword access specified.
|
||||
*/
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
|
||||
"Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
|
||||
acpi_ut_get_node_name(obj_desc->
|
||||
common_field.node),
|
||||
obj_desc->common_field.access_byte_width,
|
||||
|
@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
|
|||
* exceeds region length, indicate an error
|
||||
*/
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
|
||||
"Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
|
||||
acpi_ut_get_node_name(obj_desc->common_field.node),
|
||||
obj_desc->common_field.base_byte_offset,
|
||||
field_datum_byte_offset,
|
||||
|
@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
|
|||
if (ACPI_FAILURE(status)) {
|
||||
if (status == AE_NOT_IMPLEMENTED) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Region %s(%X) not implemented",
|
||||
"Region %s(0x%X) not implemented",
|
||||
acpi_ut_get_region_name(rgn_desc->region.
|
||||
space_id),
|
||||
rgn_desc->region.space_id));
|
||||
} else if (status == AE_NOT_EXIST) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Region %s(%X) has no handler",
|
||||
"Region %s(0x%X) has no handler",
|
||||
acpi_ut_get_region_name(rgn_desc->region.
|
||||
space_id),
|
||||
rgn_desc->region.space_id));
|
||||
|
@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
|
||||
ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
|
||||
obj_desc->common.type));
|
||||
status = AE_AML_INTERNAL;
|
||||
break;
|
||||
|
@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown UpdateRule value: %X",
|
||||
"Unknown UpdateRule value: 0x%X",
|
||||
(obj_desc->common_field.
|
||||
field_flags &
|
||||
AML_FIELD_UPDATE_RULE_MASK)));
|
||||
|
@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
|
|||
if (buffer_length <
|
||||
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Field size %X (bits) is too large for buffer (%X)",
|
||||
"Field size %u (bits) is too large for buffer (%u)",
|
||||
obj_desc->common_field.bit_length, buffer_length));
|
||||
|
||||
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
|
||||
|
|
|
@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
|
||||
obj_desc->reference.class));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
|
||||
ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
|
||||
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
|
||||
return_ACPI_STATUS(AE_TYPE);
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Invalid object type: %X",
|
||||
ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
|
||||
operand0->common.type));
|
||||
status = AE_AML_INTERNAL;
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
|
|||
|
||||
/* Invalid object type, should not happen here */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Invalid object type: %X",
|
||||
ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
|
||||
operand0->common.type));
|
||||
status = AE_AML_INTERNAL;
|
||||
goto cleanup;
|
||||
|
|
|
@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
|
|||
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
|
||||
|
||||
/*
|
||||
* Migrate the previous sync level associated with this mutex to the
|
||||
* previous mutex on the list so that it may be preserved. This handles
|
||||
* the case where several mutexes have been acquired at the same level,
|
||||
* but are not released in opposite order.
|
||||
* Migrate the previous sync level associated with this mutex to
|
||||
* the previous mutex on the list so that it may be preserved.
|
||||
* This handles the case where several mutexes have been acquired
|
||||
* at the same level, but are not released in opposite order.
|
||||
*/
|
||||
(obj_desc->mutex.prev)->mutex.original_sync_level =
|
||||
obj_desc->mutex.original_sync_level;
|
||||
|
@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
|
|||
*
|
||||
* FUNCTION: acpi_ex_link_mutex
|
||||
*
|
||||
* PARAMETERS: obj_desc - The mutex to be linked
|
||||
* Thread - Current executing thread object
|
||||
* PARAMETERS: obj_desc - The mutex to be linked
|
||||
* Thread - Current executing thread object
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
|
@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
|
|||
*
|
||||
* FUNCTION: acpi_ex_acquire_mutex_object
|
||||
*
|
||||
* PARAMETERS: time_desc - Timeout in milliseconds
|
||||
* PARAMETERS: Timeout - Timeout in milliseconds
|
||||
* obj_desc - Mutex object
|
||||
* Thread - Current thread state
|
||||
* thread_id - Current thread state
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
|
@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Must have a valid thread ID */
|
||||
/* Must have a valid thread state struct */
|
||||
|
||||
if (!walk_state->thread) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
|
@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
|||
*/
|
||||
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
|
||||
"Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
|
||||
acpi_ut_get_node_name(obj_desc->mutex.node),
|
||||
walk_state->thread->current_sync_level));
|
||||
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
|
||||
|
@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
{
|
||||
acpi_status status = AE_OK;
|
||||
u8 previous_sync_level;
|
||||
struct acpi_thread_state *owner_thread;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ex_release_mutex);
|
||||
|
||||
|
@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
owner_thread = obj_desc->mutex.owner_thread;
|
||||
|
||||
/* The mutex must have been previously acquired in order to release it */
|
||||
|
||||
if (!obj_desc->mutex.owner_thread) {
|
||||
if (!owner_thread) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Cannot release Mutex [%4.4s], not acquired",
|
||||
acpi_ut_get_node_name(obj_desc->mutex.node)));
|
||||
|
@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
* The Mutex is owned, but this thread must be the owner.
|
||||
* Special case for Global Lock, any thread can release
|
||||
*/
|
||||
if ((obj_desc->mutex.owner_thread->thread_id !=
|
||||
walk_state->thread->thread_id)
|
||||
&& (obj_desc != acpi_gbl_global_lock_mutex)) {
|
||||
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
|
||||
(obj_desc != acpi_gbl_global_lock_mutex)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
|
||||
ACPI_CAST_PTR(void, walk_state->thread->thread_id),
|
||||
acpi_ut_get_node_name(obj_desc->mutex.node),
|
||||
ACPI_CAST_PTR(void,
|
||||
obj_desc->mutex.owner_thread->
|
||||
thread_id)));
|
||||
ACPI_CAST_PTR(void, owner_thread->thread_id)));
|
||||
return_ACPI_STATUS(AE_AML_NOT_OWNER);
|
||||
}
|
||||
|
||||
|
@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
* different level can only mean that the mutex ordering rule is being
|
||||
* violated. This behavior is clarified in ACPI 4.0 specification.
|
||||
*/
|
||||
if (obj_desc->mutex.sync_level !=
|
||||
walk_state->thread->current_sync_level) {
|
||||
if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
|
||||
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
|
||||
acpi_ut_get_node_name(obj_desc->mutex.node),
|
||||
obj_desc->mutex.sync_level,
|
||||
walk_state->thread->current_sync_level));
|
||||
|
@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
* acquired, but are not released in reverse order.
|
||||
*/
|
||||
previous_sync_level =
|
||||
walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
|
||||
owner_thread->acquired_mutex_list->mutex.original_sync_level;
|
||||
|
||||
status = acpi_ex_release_mutex_object(obj_desc);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
|
||||
/* Restore the previous sync_level */
|
||||
|
||||
walk_state->thread->current_sync_level = previous_sync_level;
|
||||
owner_thread->current_sync_level = previous_sync_level;
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
|||
*
|
||||
* FUNCTION: acpi_ex_release_all_mutexes
|
||||
*
|
||||
* PARAMETERS: Thread - Current executing thread object
|
||||
* PARAMETERS: Thread - Current executing thread object
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
|
|
|
@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
|
|||
name_string = ACPI_ALLOCATE(size_needed);
|
||||
if (!name_string) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not allocate size %d", size_needed));
|
||||
"Could not allocate size %u", size_needed));
|
||||
return_PTR(NULL);
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
|
|||
*/
|
||||
status = AE_AML_BAD_NAME;
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Bad character %02x in name, at %p",
|
||||
"Bad character 0x%02x in name, at %p",
|
||||
*aml_address, aml_address));
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default: /* Unknown opcode */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
break;
|
||||
|
@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
|
|||
|
||||
case AML_SLEEP_OP: /* Sleep (msec_time) */
|
||||
|
||||
status = acpi_ex_system_do_suspend(operand[0]->integer.value);
|
||||
status = acpi_ex_system_do_sleep(operand[0]->integer.value);
|
||||
break;
|
||||
|
||||
case AML_STALL_OP: /* Stall (usec_time) */
|
||||
|
@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default: /* Unknown opcode */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
break;
|
||||
|
@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default: /* Unknown opcode */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
if (digit > 0) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Integer too large to convert to BCD: %8.8X%8.8X",
|
||||
"Integer too large to convert to BCD: 0x%8.8X%8.8X",
|
||||
ACPI_FORMAT_UINT64(operand[0]->
|
||||
integer.value)));
|
||||
status = AE_AML_NUMERIC_OVERFLOW;
|
||||
|
@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default: /* Unknown opcode */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown Index TargetType %X in reference object %p",
|
||||
"Unknown Index TargetType 0x%X in reference object %p",
|
||||
operand[0]->reference.
|
||||
target_type, operand[0]));
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
|
@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown class in reference(%p) - %2.2X",
|
||||
"Unknown class in reference(%p) - 0x%2.2X",
|
||||
operand[0],
|
||||
operand[0]->reference.class));
|
||||
|
||||
|
@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
|
|
@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
|
|||
status = AE_AML_OPERAND_TYPE;
|
||||
break;
|
||||
}
|
||||
#ifdef ACPI_GPE_NOTIFY_CHECK
|
||||
/*
|
||||
* GPE method wake/notify check. Here, we want to ensure that we
|
||||
* don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
|
||||
* GPE method during system runtime. If we do, the GPE is marked
|
||||
* as "wake-only" and disabled.
|
||||
*
|
||||
* 1) Is the Notify() value == device_wake?
|
||||
* 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
|
||||
* 3) Did the original GPE happen at system runtime?
|
||||
* (versus during wake)
|
||||
*
|
||||
* If all three cases are true, this is a wake-only GPE that should
|
||||
* be disabled at runtime.
|
||||
*/
|
||||
if (value == 2) { /* device_wake */
|
||||
status =
|
||||
acpi_ev_check_for_wake_only_gpe(walk_state->
|
||||
gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
|
||||
|
||||
return_ACPI_STATUS(AE_OK)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Dispatch the notify to the appropriate handler
|
||||
|
@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
}
|
||||
|
@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Index (%X%8.8X) is beyond end of object",
|
||||
"Index (0x%8.8X%8.8X) is beyond end of object",
|
||||
ACPI_FORMAT_UINT64(index)));
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
break;
|
||||
|
@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
|
|
@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
|
|
@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
|
|||
index = operand[5]->integer.value;
|
||||
if (index >= operand[0]->package.count) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Index (%X%8.8X) beyond package end (%X)",
|
||||
"Index (0x%8.8X%8.8X) beyond package end (0x%X)",
|
||||
ACPI_FORMAT_UINT64(index),
|
||||
operand[0]->package.count));
|
||||
status = AE_AML_PACKAGE_LIMIT;
|
||||
|
@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
|
||||
walk_state->opcode));
|
||||
status = AE_AML_BAD_OPCODE;
|
||||
goto cleanup;
|
||||
|
|
|
@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
|
|||
default:
|
||||
/* Invalid field access type */
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
|
||||
ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
|
||||
return_UINT32(0);
|
||||
}
|
||||
|
||||
|
@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
|
|||
type = acpi_ns_get_type(info->region_node);
|
||||
if (type != ACPI_TYPE_REGION) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Needed Region, found type %X (%s)",
|
||||
"Needed Region, found type 0x%X (%s)",
|
||||
type, acpi_ut_get_type_name(type)));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
|
|
|
@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
|
||||
ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
|
||||
bit_width));
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
|||
mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
|
||||
if (!mem_info->mapped_logical_address) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at %8.8X%8.8X, size %X",
|
||||
"Could not map memory at 0x%8.8X%8.8X, size %u",
|
||||
ACPI_FORMAT_NATIVE_UINT(address),
|
||||
(u32) map_length));
|
||||
mem_info->mapped_length = 0;
|
||||
|
@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
|
|||
{
|
||||
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
|
||||
|
||||
/* Perform the memory read or write */
|
||||
|
||||
/*
|
||||
* Perform the memory read or write. The bit_width was already
|
||||
* validated.
|
||||
*/
|
||||
switch (function) {
|
||||
case ACPI_READ:
|
||||
|
||||
|
@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
|
|||
break;
|
||||
|
||||
case ACPI_WRITE:
|
||||
|
||||
ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
|
||||
ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
return_ACPI_STATUS(AE_SUPPORT);
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
|
|
|
@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
|
|||
/* No named references are allowed here */
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unsupported Reference type %X",
|
||||
"Unsupported Reference type 0x%X",
|
||||
source_desc->reference.class));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
|
@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
|
|||
/* Default case is for unknown types */
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Node %p - Unknown object type %X",
|
||||
"Node %p - Unknown object type 0x%X",
|
||||
node, entry_type));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
|
|
|
@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
|
|||
/* Invalid reference object */
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown TargetType %X in Index/Reference object %p",
|
||||
"Unknown TargetType 0x%X in Index/Reference object %p",
|
||||
stack_desc->reference.target_type,
|
||||
stack_desc));
|
||||
status = AE_AML_INTERNAL;
|
||||
|
@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown Reference type %X in %p", ref_type,
|
||||
stack_desc));
|
||||
"Unknown Reference type 0x%X in %p",
|
||||
ref_type, stack_desc));
|
||||
status = AE_AML_INTERNAL;
|
||||
break;
|
||||
}
|
||||
|
@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
|
|||
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Not a namespace node %p [%s]",
|
||||
node,
|
||||
acpi_ut_get_descriptor_name(node)));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
|
@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown Reference Class %2.2X",
|
||||
"Unknown Reference Class 0x%2.2X",
|
||||
obj_desc->reference.class));
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
|
|||
|
||||
arg_types = op_info->runtime_args;
|
||||
if (arg_types == ARGI_INVALID_OPCODE) {
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
|
||||
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_INTERNAL);
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
|
|||
|
||||
if (!acpi_ut_valid_object_type(object_type)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Bad operand object type [%X]",
|
||||
"Bad operand object type [0x%X]",
|
||||
object_type));
|
||||
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
|
@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unknown Reference Class %2.2X in %p",
|
||||
"Unknown Reference Class 0x%2.2X in %p",
|
||||
obj_desc->reference.class,
|
||||
obj_desc));
|
||||
|
||||
|
@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
|
|||
/* Unknown type */
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Internal - Unknown ARGI (required operand) type %X",
|
||||
"Internal - Unknown ARGI (required operand) type 0x%X",
|
||||
this_arg_type));
|
||||
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Module Name: exstore - AML Interpreter object store support
|
||||
|
@ -53,224 +52,11 @@
|
|||
ACPI_MODULE_NAME("exstore")
|
||||
|
||||
/* Local prototypes */
|
||||
static void
|
||||
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
|
||||
u32 level, u32 index);
|
||||
|
||||
static acpi_status
|
||||
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
|
||||
union acpi_operand_object *dest_desc,
|
||||
struct acpi_walk_state *walk_state);
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_do_debug_object
|
||||
*
|
||||
* PARAMETERS: source_desc - Value to be stored
|
||||
* Level - Indentation level (used for packages)
|
||||
* Index - Current package element, zero if not pkg
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Handles stores to the Debug Object.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
|
||||
u32 level, u32 index)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
|
||||
|
||||
/* Print line header as long as we are not in the middle of an object display */
|
||||
|
||||
if (!((level > 0) && index == 0)) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
|
||||
level, " "));
|
||||
}
|
||||
|
||||
/* Display index for package output only */
|
||||
|
||||
if (index > 0) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"(%.2u) ", index - 1));
|
||||
}
|
||||
|
||||
if (!source_desc) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
|
||||
acpi_ut_get_object_type_name
|
||||
(source_desc)));
|
||||
|
||||
if (!acpi_ut_valid_internal_object(source_desc)) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"%p, Invalid Internal Object!\n",
|
||||
source_desc));
|
||||
return_VOID;
|
||||
}
|
||||
} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
|
||||
acpi_ut_get_type_name(((struct
|
||||
acpi_namespace_node
|
||||
*)source_desc)->
|
||||
type),
|
||||
source_desc));
|
||||
return_VOID;
|
||||
} else {
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/* source_desc is of type ACPI_DESC_TYPE_OPERAND */
|
||||
|
||||
switch (source_desc->common.type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
|
||||
/* Output correct integer width */
|
||||
|
||||
if (acpi_gbl_integer_byte_width == 4) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
|
||||
(u32) source_desc->integer.
|
||||
value));
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"0x%8.8X%8.8X\n",
|
||||
ACPI_FORMAT_UINT64(source_desc->
|
||||
integer.
|
||||
value)));
|
||||
}
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_BUFFER:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
|
||||
(u32) source_desc->buffer.length));
|
||||
ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
|
||||
(source_desc->buffer.length <
|
||||
256) ? source_desc->buffer.length : 256);
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_STRING:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
|
||||
source_desc->string.length,
|
||||
source_desc->string.pointer));
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_PACKAGE:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"[Contains 0x%.2X Elements]\n",
|
||||
source_desc->package.count));
|
||||
|
||||
/* Output the entire contents of the package */
|
||||
|
||||
for (i = 0; i < source_desc->package.count; i++) {
|
||||
acpi_ex_do_debug_object(source_desc->package.
|
||||
elements[i], level + 4, i + 1);
|
||||
}
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_REFERENCE:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
|
||||
acpi_ut_get_reference_name(source_desc)));
|
||||
|
||||
/* Decode the reference */
|
||||
|
||||
switch (source_desc->reference.class) {
|
||||
case ACPI_REFCLASS_INDEX:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
|
||||
source_desc->reference.value));
|
||||
break;
|
||||
|
||||
case ACPI_REFCLASS_TABLE:
|
||||
|
||||
/* Case for ddb_handle */
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"Table Index 0x%X\n",
|
||||
source_desc->reference.value));
|
||||
return;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
|
||||
|
||||
/* Check for valid node first, then valid object */
|
||||
|
||||
if (source_desc->reference.node) {
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE
|
||||
(source_desc->reference.node) !=
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
" %p - Not a valid namespace node\n",
|
||||
source_desc->reference.
|
||||
node));
|
||||
} else {
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
|
||||
"Node %p [%4.4s] ",
|
||||
source_desc->reference.
|
||||
node,
|
||||
(source_desc->reference.
|
||||
node)->name.ascii));
|
||||
|
||||
switch ((source_desc->reference.node)->type) {
|
||||
|
||||
/* These types have no attached object */
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
acpi_os_printf("Device\n");
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_THERMAL:
|
||||
acpi_os_printf("Thermal Zone\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
acpi_ex_do_debug_object((source_desc->
|
||||
reference.
|
||||
node)->object,
|
||||
level + 4, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (source_desc->reference.object) {
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE
|
||||
(source_desc->reference.object) ==
|
||||
ACPI_DESC_TYPE_NAMED) {
|
||||
acpi_ex_do_debug_object(((struct
|
||||
acpi_namespace_node *)
|
||||
source_desc->reference.
|
||||
object)->object,
|
||||
level + 4, 0);
|
||||
} else {
|
||||
acpi_ex_do_debug_object(source_desc->reference.
|
||||
object, level + 4, 0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
|
||||
source_desc));
|
||||
break;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_store
|
||||
|
@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
|
|||
source_desc,
|
||||
acpi_ut_get_object_type_name(source_desc)));
|
||||
|
||||
acpi_ex_do_debug_object(source_desc, 0, 0);
|
||||
ACPI_DEBUG_OBJECT(source_desc, 0, 0);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
|
||||
ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
|
||||
ref_desc->reference.class));
|
||||
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
|
|||
* (ACPI specifies 100 usec as max, but this gives some slack in
|
||||
* order to support existing BIOSs)
|
||||
*/
|
||||
ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
|
||||
ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
|
||||
how_long));
|
||||
status = AE_AML_OPERAND_VALUE;
|
||||
} else {
|
||||
|
@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
|
|||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ex_system_do_suspend
|
||||
* FUNCTION: acpi_ex_system_do_sleep
|
||||
*
|
||||
* PARAMETERS: how_long - The amount of time to suspend,
|
||||
* PARAMETERS: how_long - The amount of time to sleep,
|
||||
* in milliseconds
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Suspend running thread for specified amount of time.
|
||||
* DESCRIPTION: Sleep the running thread for specified amount of time.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ex_system_do_suspend(u64 how_long)
|
||||
acpi_status acpi_ex_system_do_sleep(u64 how_long)
|
||||
{
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
|
|
|
@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
|
|||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
if (register_id > ACPI_BITREG_MAX) {
|
||||
ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
|
||||
ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
|
||||
register_id));
|
||||
return (NULL);
|
||||
}
|
||||
|
@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
|
||||
ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
|
||||
status = AE_BAD_PARAMETER;
|
||||
break;
|
||||
}
|
||||
|
@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
|
|||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
|
||||
ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
|
||||
status = AE_BAD_PARAMETER;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
|||
|
||||
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
|
||||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
|
||||
ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
|
||||
ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
|
||||
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
|
|||
|
||||
if (last_address > ACPI_UINT16_MAX) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Illegal I/O port address/length above 64K: 0x%p/%X",
|
||||
"Illegal I/O port address/length above 64K: %p/0x%X",
|
||||
ACPI_CAST_PTR(void, address), byte_width));
|
||||
return_ACPI_STATUS(AE_LIMIT);
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Unsupported initial type value %X",
|
||||
"Unsupported initial type value 0x%X",
|
||||
init_val->type));
|
||||
acpi_ut_remove_reference(obj_desc);
|
||||
obj_desc = NULL;
|
||||
|
|
|
@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
|||
/* Check the node type and name */
|
||||
|
||||
if (type > ACPI_TYPE_LOCAL_MAX) {
|
||||
ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
|
||||
type));
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Invalid ACPI Object Type 0x%08X", type));
|
||||
}
|
||||
|
||||
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
|
||||
|
|
|
@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
|
|||
|
||||
if (index != 0) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not construct external pathname; index=%X, size=%X, Path=%s",
|
||||
"Could not construct external pathname; index=%u, size=%u, Path=%s",
|
||||
(u32) index, (u32) size, &name_buffer[size]));
|
||||
|
||||
return (AE_BAD_PARAMETER);
|
||||
|
|
|
@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
|
|||
|
||||
if (!node || !target_name || !return_node) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Null parameter: Node %p Name %X ReturnNode %p",
|
||||
"Null parameter: Node %p Name 0x%X ReturnNode %p",
|
||||
node, target_name, return_node));
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
|
|||
|
||||
/* Type code out of range */
|
||||
|
||||
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
|
||||
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
|
||||
return_UINT32(ACPI_NS_NORMAL);
|
||||
}
|
||||
|
||||
|
@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
|
|||
|
||||
/* type code out of range */
|
||||
|
||||
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
|
||||
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
|
||||
return_UINT32(ACPI_NS_NORMAL);
|
||||
}
|
||||
|
||||
|
|
|
@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
|
||||
ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
|
@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
|
||||
ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
|
||||
status = AE_AML_OPERAND_TYPE;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
|
|||
/* The opcode is unrecognized. Just skip unknown opcodes */
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Found unknown opcode %X at AML address %p offset %X, ignoring",
|
||||
"Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
|
||||
walk_state->opcode, walk_state->parser_state.aml,
|
||||
walk_state->aml_offset));
|
||||
|
||||
|
@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
|
|||
if (status == AE_AML_NO_RETURN_VALUE) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Invoked method did not return a value"));
|
||||
|
||||
}
|
||||
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "acparser.h"
|
||||
#include "acdispat.h"
|
||||
#include "acinterp.h"
|
||||
#include "actables.h"
|
||||
#include "amlcode.h"
|
||||
|
||||
#define _COMPONENT ACPI_PARSER
|
||||
|
@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
|
|||
|
||||
ACPI_FUNCTION_TRACE(ps_execute_method);
|
||||
|
||||
/* Quick validation of DSDT header */
|
||||
|
||||
acpi_tb_check_dsdt_header();
|
||||
|
||||
/* Validate the Info and method Node */
|
||||
|
||||
if (!info || !info->resolved_node) {
|
||||
|
|
|
@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
|
||||
if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X]) Need sub-package, found %s",
|
||||
"(PRT[%u]) Need sub-package, found %s",
|
||||
index,
|
||||
acpi_ut_get_object_type_name
|
||||
(*top_object_list)));
|
||||
|
@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
|
||||
if ((*top_object_list)->package.count != 4) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X]) Need package of length 4, found length %d",
|
||||
"(PRT[%u]) Need package of length 4, found length %u",
|
||||
index, (*top_object_list)->package.count));
|
||||
return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
obj_desc = sub_object_list[0];
|
||||
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X].Address) Need Integer, found %s",
|
||||
"(PRT[%u].Address) Need Integer, found %s",
|
||||
index,
|
||||
acpi_ut_get_object_type_name(obj_desc)));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
|
@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
obj_desc = sub_object_list[1];
|
||||
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X].Pin) Need Integer, found %s",
|
||||
"(PRT[%u].Pin) Need Integer, found %s",
|
||||
index,
|
||||
acpi_ut_get_object_type_name(obj_desc)));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
|
@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
if (obj_desc->reference.class !=
|
||||
ACPI_REFCLASS_NAME) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X].Source) Need name, found Reference Class %X",
|
||||
"(PRT[%u].Source) Need name, found Reference Class 0x%X",
|
||||
index,
|
||||
obj_desc->reference.class));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
|
@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X].Source) Need Ref/String/Integer, found %s",
|
||||
"(PRT[%u].Source) Need Ref/String/Integer, found %s",
|
||||
index,
|
||||
acpi_ut_get_object_type_name
|
||||
(obj_desc)));
|
||||
|
@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
|
|||
obj_desc = sub_object_list[3];
|
||||
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"(PRT[%X].SourceIndex) Need Integer, found %s",
|
||||
"(PRT[%u].SourceIndex) Need Integer, found %s",
|
||||
index,
|
||||
acpi_ut_get_object_type_name(obj_desc)));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
|
|
|
@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
|
|||
[resource_index]);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not convert AML resource (Type %X)",
|
||||
"Could not convert AML resource (Type 0x%X)",
|
||||
*aml));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
|
|||
|
||||
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Invalid descriptor type (%X) in resource list",
|
||||
"Invalid descriptor type (0x%X) in resource list",
|
||||
resource->type));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
|
|||
[resource->type]);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Could not convert resource (type %X) to AML",
|
||||
"Could not convert resource (type 0x%X) to AML",
|
||||
resource->type));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
|
|||
/* Each internal resource struct is expected to be 32-bit aligned */
|
||||
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Misaligned resource pointer (get): %p Type %2.2X Len %X",
|
||||
"Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
|
||||
resource, resource->type, resource->length));
|
||||
}
|
||||
|
||||
|
@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
|
|||
* "IRQ Format"), so 0x00 and 0x09 are illegal.
|
||||
*/
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Invalid interrupt polarity/trigger in resource list, %X",
|
||||
"Invalid interrupt polarity/trigger in resource list, 0x%X",
|
||||
aml->irq.flags));
|
||||
return_ACPI_STATUS(AE_BAD_DATA);
|
||||
}
|
||||
|
|
|
@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
|
|||
if (length > sizeof(struct acpi_table_fadt)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"FADT (revision %u) is longer than ACPI 2.0 version, "
|
||||
"truncating length 0x%X to 0x%X",
|
||||
"truncating length %u to %u",
|
||||
table->revision, length,
|
||||
(u32)sizeof(struct acpi_table_fadt)));
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
|
|||
if (address64->address && address32 &&
|
||||
(address64->address != (u64) address32)) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32",
|
||||
"32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
|
||||
fadt_info_table[i].name, address32,
|
||||
ACPI_FORMAT_UINT64(address64->address)));
|
||||
}
|
||||
|
@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
|
|||
(acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"32/64X FACS address mismatch in FADT - "
|
||||
"%8.8X/%8.8X%8.8X, using 32",
|
||||
"0x%8.8X/0x%8.8X%8.8X, using 32",
|
||||
acpi_gbl_FADT.facs,
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
|
||||
|
||||
|
@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
|
|||
(acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"32/64X DSDT address mismatch in FADT - "
|
||||
"%8.8X/%8.8X%8.8X, using 32",
|
||||
"0x%8.8X/0x%8.8X%8.8X, using 32",
|
||||
acpi_gbl_FADT.dsdt,
|
||||
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
|
||||
|
||||
|
@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
|
|||
if (address64->address &&
|
||||
(address64->bit_width != ACPI_MUL_8(length))) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"32/64X length mismatch in %s: %d/%d",
|
||||
"32/64X length mismatch in %s: %u/%u",
|
||||
name, ACPI_MUL_8(length),
|
||||
address64->bit_width));
|
||||
}
|
||||
|
@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
|
|||
if (!address64->address || !length) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Required field %s has zero address and/or length:"
|
||||
" %8.8X%8.8X/%X",
|
||||
" 0x%8.8X%8.8X/0x%X",
|
||||
name,
|
||||
ACPI_FORMAT_UINT64(address64->
|
||||
address),
|
||||
|
@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
|
|||
(!address64->address && length)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Optional field %s has zero address or length: "
|
||||
"%8.8X%8.8X/%X",
|
||||
"0x%8.8X%8.8X/0x%X",
|
||||
name,
|
||||
ACPI_FORMAT_UINT64(address64->
|
||||
address),
|
||||
|
@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
|
|||
(fadt_info_table[i].default_length !=
|
||||
target64->bit_width)) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Invalid length for %s: %d, using default %d",
|
||||
"Invalid length for %s: %u, using default %u",
|
||||
fadt_info_table[i].name,
|
||||
target64->bit_width,
|
||||
fadt_info_table[i].
|
||||
|
|
|
@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
|
|||
|
||||
/* Search for the table */
|
||||
|
||||
for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
|
||||
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
|
||||
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
|
||||
header.signature, ACPI_NAME_SIZE)) {
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
|
|||
|
||||
/* Check if table is already registered */
|
||||
|
||||
for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
|
||||
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
|
||||
if (!acpi_gbl_root_table_list.tables[i].pointer) {
|
||||
status =
|
||||
acpi_tb_verify_table(&acpi_gbl_root_table_list.
|
||||
|
@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
|
|||
/* Increase the Table Array size */
|
||||
|
||||
tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
|
||||
size +
|
||||
max_table_count +
|
||||
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
|
||||
sizeof(struct acpi_table_desc));
|
||||
if (!tables) {
|
||||
|
@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
|
|||
|
||||
if (acpi_gbl_root_table_list.tables) {
|
||||
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
|
||||
(acpi_size) acpi_gbl_root_table_list.size *
|
||||
sizeof(struct acpi_table_desc));
|
||||
(acpi_size) acpi_gbl_root_table_list.
|
||||
max_table_count * sizeof(struct acpi_table_desc));
|
||||
|
||||
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
|
||||
ACPI_FREE(acpi_gbl_root_table_list.tables);
|
||||
|
@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
|
|||
}
|
||||
|
||||
acpi_gbl_root_table_list.tables = tables;
|
||||
acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
|
||||
acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
|
||||
acpi_gbl_root_table_list.max_table_count +=
|
||||
ACPI_ROOT_TABLE_SIZE_INCREMENT;
|
||||
acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
|
|||
struct acpi_table_header *table,
|
||||
u32 length, u8 flags, u32 *table_index)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
acpi_status status;
|
||||
struct acpi_table_desc *new_table;
|
||||
|
||||
/* Ensure that there is room for the table in the Root Table List */
|
||||
|
||||
if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
|
||||
if (acpi_gbl_root_table_list.current_table_count >=
|
||||
acpi_gbl_root_table_list.max_table_count) {
|
||||
status = acpi_tb_resize_root_table_list();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (status);
|
||||
}
|
||||
}
|
||||
|
||||
new_table =
|
||||
&acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
|
||||
current_table_count];
|
||||
|
||||
/* Initialize added table */
|
||||
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
|
||||
address = address;
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
|
||||
pointer = table;
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
|
||||
length;
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
|
||||
owner_id = 0;
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
|
||||
flags;
|
||||
new_table->address = address;
|
||||
new_table->pointer = table;
|
||||
new_table->length = length;
|
||||
new_table->owner_id = 0;
|
||||
new_table->flags = flags;
|
||||
|
||||
ACPI_MOVE_32_TO_32(&
|
||||
(acpi_gbl_root_table_list.
|
||||
tables[acpi_gbl_root_table_list.count].signature),
|
||||
table->signature);
|
||||
ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
|
||||
|
||||
*table_index = acpi_gbl_root_table_list.count;
|
||||
acpi_gbl_root_table_list.count++;
|
||||
return (status);
|
||||
*table_index = acpi_gbl_root_table_list.current_table_count;
|
||||
acpi_gbl_root_table_list.current_table_count++;
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
|
|||
|
||||
/* Delete the individual tables */
|
||||
|
||||
for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
|
||||
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
|
||||
acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
|
||||
}
|
||||
|
||||
|
@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
|
|||
|
||||
acpi_gbl_root_table_list.tables = NULL;
|
||||
acpi_gbl_root_table_list.flags = 0;
|
||||
acpi_gbl_root_table_list.count = 0;
|
||||
acpi_gbl_root_table_list.current_table_count = 0;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
|
@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
if (table_index >= acpi_gbl_root_table_list.count) {
|
||||
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
|
||||
|
||||
/* The table index does not exist */
|
||||
|
||||
|
@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
|
|||
ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (table_index < acpi_gbl_root_table_list.count) {
|
||||
if (table_index < acpi_gbl_root_table_list.current_table_count) {
|
||||
status = acpi_ut_allocate_owner_id
|
||||
(&(acpi_gbl_root_table_list.tables[table_index].owner_id));
|
||||
}
|
||||
|
@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
|
|||
ACPI_FUNCTION_TRACE(tb_release_owner_id);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (table_index < acpi_gbl_root_table_list.count) {
|
||||
if (table_index < acpi_gbl_root_table_list.current_table_count) {
|
||||
acpi_ut_release_owner_id(&
|
||||
(acpi_gbl_root_table_list.
|
||||
tables[table_index].owner_id));
|
||||
|
@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
|
|||
ACPI_FUNCTION_TRACE(tb_get_owner_id);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (table_index < acpi_gbl_root_table_list.count) {
|
||||
if (table_index < acpi_gbl_root_table_list.current_table_count) {
|
||||
*owner_id =
|
||||
acpi_gbl_root_table_list.tables[table_index].owner_id;
|
||||
status = AE_OK;
|
||||
|
@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
|
|||
u8 is_loaded = FALSE;
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (table_index < acpi_gbl_root_table_list.count) {
|
||||
if (table_index < acpi_gbl_root_table_list.current_table_count) {
|
||||
is_loaded = (u8)
|
||||
(acpi_gbl_root_table_list.tables[table_index].flags &
|
||||
ACPI_TABLE_IS_LOADED);
|
||||
|
@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
|
|||
{
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
if (table_index < acpi_gbl_root_table_list.count) {
|
||||
if (table_index < acpi_gbl_root_table_list.current_table_count) {
|
||||
if (is_loaded) {
|
||||
acpi_gbl_root_table_list.tables[table_index].flags |=
|
||||
ACPI_TABLE_IS_LOADED;
|
||||
|
|
|
@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
|
|||
u8 acpi_tb_tables_loaded(void)
|
||||
{
|
||||
|
||||
if (acpi_gbl_root_table_list.count >= 3) {
|
||||
if (acpi_gbl_root_table_list.current_table_count >= 3) {
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
|
|||
|
||||
if (checksum) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
|
||||
"Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
|
||||
table->signature, table->checksum,
|
||||
(u8) (table->checksum - checksum)));
|
||||
|
||||
|
@ -347,6 +347,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
|
|||
return sum;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_check_dsdt_header
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
|
||||
* if the DSDT has been replaced from outside the OS and/or if
|
||||
* the DSDT header has been corrupted.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
void acpi_tb_check_dsdt_header(void)
|
||||
{
|
||||
|
||||
/* Compare original length and checksum to current values */
|
||||
|
||||
if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
|
||||
acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"The DSDT has been corrupted or replaced - old, new headers below"));
|
||||
acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
|
||||
acpi_tb_print_table_header(0, acpi_gbl_DSDT);
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Please send DMI info to linux-acpi@vger.kernel.org\n"
|
||||
"If system does not work as expected, please boot with acpi=copy_dsdt"));
|
||||
|
||||
/* Disable further error messages */
|
||||
|
||||
acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
|
||||
acpi_gbl_original_dsdt_header.checksum =
|
||||
acpi_gbl_DSDT->checksum;
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_copy_dsdt
|
||||
*
|
||||
* PARAMETERS: table_desc - Installed table to copy
|
||||
*
|
||||
* RETURN: None
|
||||
*
|
||||
* DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
|
||||
* Some very bad BIOSs are known to either corrupt the DSDT or
|
||||
* install a new, bad DSDT. This copy works around the problem.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
|
||||
{
|
||||
struct acpi_table_header *new_table;
|
||||
struct acpi_table_desc *table_desc;
|
||||
|
||||
table_desc = &acpi_gbl_root_table_list.tables[table_index];
|
||||
|
||||
new_table = ACPI_ALLOCATE(table_desc->length);
|
||||
if (!new_table) {
|
||||
ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
|
||||
table_desc->length));
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
|
||||
acpi_tb_delete_table(table_desc);
|
||||
table_desc->pointer = new_table;
|
||||
table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"Forced DSDT copy: length 0x%05X copied locally, original unmapped",
|
||||
new_table->length));
|
||||
|
||||
return (new_table);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_tb_install_table
|
||||
|
@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
|
|||
/* Will truncate 64-bit address to 32 bits, issue warning */
|
||||
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"64-bit Physical Address in XSDT is too large (%8.8X%8.8X),"
|
||||
"64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
|
||||
" truncating",
|
||||
ACPI_FORMAT_UINT64(address64)));
|
||||
}
|
||||
|
@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
|
|||
*/
|
||||
table_entry =
|
||||
ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
|
||||
acpi_gbl_root_table_list.count = 2;
|
||||
acpi_gbl_root_table_list.current_table_count = 2;
|
||||
|
||||
/*
|
||||
* Initialize the root table array from the RSDT/XSDT
|
||||
*/
|
||||
for (i = 0; i < table_count; i++) {
|
||||
if (acpi_gbl_root_table_list.count >=
|
||||
acpi_gbl_root_table_list.size) {
|
||||
if (acpi_gbl_root_table_list.current_table_count >=
|
||||
acpi_gbl_root_table_list.max_table_count) {
|
||||
|
||||
/* There is no more room in the root table array, attempt resize */
|
||||
|
||||
|
@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
|
|||
"Truncating %u table entries!",
|
||||
(unsigned) (table_count -
|
||||
(acpi_gbl_root_table_list.
|
||||
count - 2))));
|
||||
current_table_count -
|
||||
2))));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
|
||||
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
|
||||
address =
|
||||
acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
|
||||
current_table_count].address =
|
||||
acpi_tb_get_root_table_entry(table_entry, table_entry_size);
|
||||
|
||||
table_entry += table_entry_size;
|
||||
acpi_gbl_root_table_list.count++;
|
||||
acpi_gbl_root_table_list.current_table_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
|
|||
* Complete the initialization of the root table array by examining
|
||||
* the header of each table
|
||||
*/
|
||||
for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
|
||||
for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
|
||||
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
|
||||
address, NULL, i);
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ static int no_auto_ssdt;
|
|||
acpi_status acpi_allocate_root_table(u32 initial_table_count)
|
||||
{
|
||||
|
||||
acpi_gbl_root_table_list.size = initial_table_count;
|
||||
acpi_gbl_root_table_list.max_table_count = initial_table_count;
|
||||
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
|
||||
|
||||
return (acpi_tb_resize_root_table_list());
|
||||
|
@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
|
|||
sizeof(struct acpi_table_desc));
|
||||
|
||||
acpi_gbl_root_table_list.tables = initial_table_array;
|
||||
acpi_gbl_root_table_list.size = initial_table_count;
|
||||
acpi_gbl_root_table_list.max_table_count = initial_table_count;
|
||||
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
|
||||
if (allow_resize) {
|
||||
acpi_gbl_root_table_list.flags |=
|
||||
|
@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
|
|||
{
|
||||
struct acpi_table_desc *tables;
|
||||
acpi_size new_size;
|
||||
acpi_size current_size;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
|
||||
|
||||
|
@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
|
|||
return_ACPI_STATUS(AE_SUPPORT);
|
||||
}
|
||||
|
||||
new_size = ((acpi_size) acpi_gbl_root_table_list.count +
|
||||
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
|
||||
/*
|
||||
* Get the current size of the root table and add the default
|
||||
* increment to create the new table size.
|
||||
*/
|
||||
current_size = (acpi_size)
|
||||
acpi_gbl_root_table_list.current_table_count *
|
||||
sizeof(struct acpi_table_desc);
|
||||
|
||||
new_size = current_size +
|
||||
(ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
|
||||
|
||||
/* Create new array and copy the old array */
|
||||
|
||||
tables = ACPI_ALLOCATE_ZEROED(new_size);
|
||||
|
@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
|
|||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
}
|
||||
|
||||
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
|
||||
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
|
||||
|
||||
acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
|
||||
/*
|
||||
* Update the root table descriptor. The new size will be the current
|
||||
* number of tables plus the increment, independent of the reserved
|
||||
* size of the original table list.
|
||||
*/
|
||||
acpi_gbl_root_table_list.tables = tables;
|
||||
acpi_gbl_root_table_list.max_table_count =
|
||||
acpi_gbl_root_table_list.current_table_count +
|
||||
ACPI_ROOT_TABLE_SIZE_INCREMENT;
|
||||
acpi_gbl_root_table_list.flags =
|
||||
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
|
||||
|
||||
|
@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
|
|||
|
||||
/* Walk the root table list */
|
||||
|
||||
for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
|
||||
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
|
||||
i++) {
|
||||
if (!ACPI_COMPARE_NAME
|
||||
(&(acpi_gbl_root_table_list.tables[i].signature),
|
||||
signature)) {
|
||||
|
@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
|
|||
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
|
||||
|
||||
/* Find table in the global table list */
|
||||
for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
|
||||
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
|
||||
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
|
||||
continue;
|
||||
}
|
||||
|
@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
|
|||
|
||||
/* Walk the root table list */
|
||||
|
||||
for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
|
||||
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
|
||||
i++) {
|
||||
if (!ACPI_COMPARE_NAME
|
||||
(&(acpi_gbl_root_table_list.tables[i].signature),
|
||||
signature)) {
|
||||
|
@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
|
|||
|
||||
/* Validate index */
|
||||
|
||||
if (table_index >= acpi_gbl_root_table_list.count) {
|
||||
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
||||
}
|
||||
|
@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
|
|||
{
|
||||
acpi_status status;
|
||||
u32 i;
|
||||
struct acpi_table_header *new_dsdt;
|
||||
|
||||
ACPI_FUNCTION_TRACE(tb_load_namespace);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
/*
|
||||
* Load the namespace. The DSDT is required, but any SSDT and PSDT tables
|
||||
* are optional.
|
||||
* Load the namespace. The DSDT is required, but any SSDT and
|
||||
* PSDT tables are optional. Verify the DSDT.
|
||||
*/
|
||||
if (!acpi_gbl_root_table_list.count ||
|
||||
if (!acpi_gbl_root_table_list.current_table_count ||
|
||||
!ACPI_COMPARE_NAME(&
|
||||
(acpi_gbl_root_table_list.
|
||||
tables[ACPI_TABLE_INDEX_DSDT].signature),
|
||||
|
@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
|
|||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* A valid DSDT is required */
|
||||
/*
|
||||
* Save the DSDT pointer for simple access. This is the mapped memory
|
||||
* address. We must take care here because the address of the .Tables
|
||||
* array can change dynamically as tables are loaded at run-time. Note:
|
||||
* .Pointer field is not validated until after call to acpi_tb_verify_table.
|
||||
*/
|
||||
acpi_gbl_DSDT =
|
||||
acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
|
||||
|
||||
status =
|
||||
acpi_tb_verify_table(&acpi_gbl_root_table_list.
|
||||
tables[ACPI_TABLE_INDEX_DSDT]);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
status = AE_NO_ACPI_TABLES;
|
||||
goto unlock_and_exit;
|
||||
/*
|
||||
* Optionally copy the entire DSDT to local memory (instead of simply
|
||||
* mapping it.) There are some BIOSs that corrupt or replace the original
|
||||
* DSDT, creating the need for this option. Default is FALSE, do not copy
|
||||
* the DSDT.
|
||||
*/
|
||||
if (acpi_gbl_copy_dsdt_locally) {
|
||||
new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
|
||||
if (new_dsdt) {
|
||||
acpi_gbl_DSDT = new_dsdt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the original DSDT header for detection of table corruption
|
||||
* and/or replacement of the DSDT from outside the OS.
|
||||
*/
|
||||
ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
|
||||
sizeof(struct acpi_table_header));
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
/* Load and parse tables */
|
||||
|
@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
|
|||
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
|
||||
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
|
||||
if ((!ACPI_COMPARE_NAME
|
||||
(&(acpi_gbl_root_table_list.tables[i].signature),
|
||||
ACPI_SIG_SSDT)
|
||||
|
|
|
@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
|
|||
ACPI_EBDA_PTR_LENGTH);
|
||||
if (!table_ptr) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at %8.8X for length %X",
|
||||
"Could not map memory at 0x%8.8X for length %u",
|
||||
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
|
||||
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
|
@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
|
|||
ACPI_EBDA_WINDOW_SIZE);
|
||||
if (!table_ptr) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at %8.8X for length %X",
|
||||
"Could not map memory at 0x%8.8X for length %u",
|
||||
physical_address, ACPI_EBDA_WINDOW_SIZE));
|
||||
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
|
@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
|
|||
|
||||
if (!table_ptr) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at %8.8X for length %X",
|
||||
"Could not map memory at 0x%8.8X for length %u",
|
||||
ACPI_HI_RSDP_WINDOW_BASE,
|
||||
ACPI_HI_RSDP_WINDOW_SIZE));
|
||||
|
||||
|
|
|
@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
|
|||
/* Report allocation error */
|
||||
|
||||
ACPI_WARNING((module, line,
|
||||
"Could not allocate size %X", (u32) size));
|
||||
"Could not allocate size %u", (u32) size));
|
||||
|
||||
return_PTR(NULL);
|
||||
}
|
||||
|
|
|
@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
|
|||
u16 reference_count;
|
||||
union acpi_operand_object *next_object;
|
||||
acpi_status status;
|
||||
acpi_size copy_size;
|
||||
|
||||
/* Save fields from destination that we don't want to overwrite */
|
||||
|
||||
reference_count = dest_desc->common.reference_count;
|
||||
next_object = dest_desc->common.next_object;
|
||||
|
||||
/* Copy the entire source object over the destination object */
|
||||
/*
|
||||
* Copy the entire source object over the destination object.
|
||||
* Note: Source can be either an operand object or namespace node.
|
||||
*/
|
||||
copy_size = sizeof(union acpi_operand_object);
|
||||
if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
|
||||
copy_size = sizeof(struct acpi_namespace_node);
|
||||
}
|
||||
|
||||
ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
|
||||
sizeof(union acpi_operand_object));
|
||||
ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
|
||||
ACPI_CAST_PTR(char, source_desc), copy_size);
|
||||
|
||||
/* Restore the saved fields */
|
||||
|
||||
|
|
|
@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
|
|||
|
||||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
|
||||
ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
|
|||
*/
|
||||
if (count > ACPI_MAX_REFERENCE_COUNT) {
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"Large Reference Count (%X) in object %p", count,
|
||||
object));
|
||||
"Large Reference Count (0x%X) in object %p",
|
||||
count, object));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
|
|||
prefix_node, path, AE_TYPE);
|
||||
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Type returned from %s was incorrect: %s, expected Btypes: %X",
|
||||
"Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
|
||||
path,
|
||||
acpi_ut_get_object_type_name(info->return_object),
|
||||
expected_return_btypes));
|
||||
|
|
|
@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
|
|||
|
||||
/* Miscellaneous variables */
|
||||
|
||||
acpi_gbl_DSDT = NULL;
|
||||
acpi_gbl_cm_single_step = FALSE;
|
||||
acpi_gbl_db_terminate_threads = FALSE;
|
||||
acpi_gbl_shutdown = FALSE;
|
||||
|
|
|
@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
|
|||
/* Guard against multiple allocations of ID to the same location */
|
||||
|
||||
if (*owner_id) {
|
||||
ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
|
||||
ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
|
||||
*owner_id));
|
||||
return_ACPI_STATUS(AE_ALREADY_EXISTS);
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
|
|||
/* Zero is not a valid owner_iD */
|
||||
|
||||
if (owner_id == 0) {
|
||||
ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
|
||||
ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
|
||||
return_VOID;
|
||||
}
|
||||
|
||||
|
@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
|
|||
acpi_gbl_owner_id_mask[index] ^= bit;
|
||||
} else {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Release of non-allocated OwnerId: %2.2X",
|
||||
"Release of non-allocated OwnerId: 0x%2.2X",
|
||||
owner_id + 1));
|
||||
}
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
|
|||
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
|
||||
} else {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Thread %p could not acquire Mutex [%X]",
|
||||
"Thread %p could not acquire Mutex [0x%X]",
|
||||
ACPI_CAST_PTR(void, this_thread_id), mutex_id));
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
|
|||
*/
|
||||
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Mutex [%X] is not acquired, cannot release",
|
||||
"Mutex [0x%X] is not acquired, cannot release",
|
||||
mutex_id));
|
||||
|
||||
return (AE_NOT_ACQUIRED);
|
||||
|
|
|
@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
|
|||
|
||||
buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
|
||||
if (!buffer) {
|
||||
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
|
||||
ACPI_ERROR((AE_INFO, "Could not allocate size %u",
|
||||
(u32) buffer_size));
|
||||
acpi_ut_remove_reference(buffer_desc);
|
||||
return_PTR(NULL);
|
||||
|
@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
|
|||
*/
|
||||
string = ACPI_ALLOCATE_ZEROED(string_size + 1);
|
||||
if (!string) {
|
||||
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
|
||||
ACPI_ERROR((AE_INFO, "Could not allocate size %u",
|
||||
(u32) string_size));
|
||||
acpi_ut_remove_reference(string_desc);
|
||||
return_PTR(NULL);
|
||||
|
@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
|
|||
*/
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Cannot convert to external object - "
|
||||
"unsupported Reference Class [%s] %X in object %p",
|
||||
"unsupported Reference Class [%s] 0x%X in object %p",
|
||||
acpi_ut_get_reference_name(internal_object),
|
||||
internal_object->reference.class,
|
||||
internal_object));
|
||||
|
@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
|
|||
default:
|
||||
|
||||
ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
|
||||
"unsupported type [%s] %X in object %p",
|
||||
"unsupported type [%s] 0x%X in object %p",
|
||||
acpi_ut_get_object_type_name(internal_object),
|
||||
internal_object->common.type, internal_object));
|
||||
status = AE_TYPE;
|
||||
|
|
|
@ -69,6 +69,44 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
|
|||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static int set_copy_dsdt(const struct dmi_system_id *id)
|
||||
{
|
||||
printk(KERN_NOTICE "%s detected - "
|
||||
"force copy of DSDT to local memory\n", id->ident);
|
||||
acpi_gbl_copy_dsdt_locally = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
|
||||
/*
|
||||
* Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
|
||||
*/
|
||||
{
|
||||
.callback = set_copy_dsdt,
|
||||
.ident = "TOSHIBA Satellite A505",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = set_copy_dsdt,
|
||||
.ident = "TOSHIBA Satellite L505D",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
#else
|
||||
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
Device Management
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -813,6 +851,12 @@ void __init acpi_early_init(void)
|
|||
|
||||
acpi_gbl_permanent_mmap = 1;
|
||||
|
||||
/*
|
||||
* If the machine falls into the DMI check table,
|
||||
* DSDT will be copied to memory
|
||||
*/
|
||||
dmi_check_system(dsdt_dmi_table);
|
||||
|
||||
status = acpi_reallocate_root_table();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX
|
||||
|
|
|
@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
|
|||
}
|
||||
|
||||
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
|
||||
ACPI_NOT_ISR, &event_status);
|
||||
&event_status);
|
||||
if (status == AE_OK)
|
||||
device->wakeup.flags.run_wake =
|
||||
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
|
||||
|
|
|
@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
|
|||
"Invalid GPE 0x%x\n", index));
|
||||
goto end;
|
||||
}
|
||||
result = acpi_get_gpe_status(*handle, index,
|
||||
ACPI_NOT_ISR, status);
|
||||
result = acpi_get_gpe_status(*handle, index, status);
|
||||
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
|
||||
result = acpi_get_event_status(index - num_gpes, status);
|
||||
|
||||
|
@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
|
|||
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
|
||||
else if (!strcmp(buf, "clear\n") &&
|
||||
(status & ACPI_EVENT_FLAG_SET))
|
||||
result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
|
||||
result = acpi_clear_gpe(handle, index);
|
||||
else
|
||||
all_counters[index].count = strtoul(buf, NULL, 0);
|
||||
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
|
||||
|
|
|
@ -87,7 +87,7 @@
|
|||
#define AE_NO_GLOBAL_LOCK (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_ABORT_METHOD (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_WAKE_ONLY_GPE (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
|
||||
#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
|
||||
|
||||
#define AE_CODE_ENV_MAX 0x001B
|
||||
|
|
|
@ -206,6 +206,7 @@
|
|||
#define ACPI_WARNING(plist) acpi_warning plist
|
||||
#define ACPI_EXCEPTION(plist) acpi_exception plist
|
||||
#define ACPI_ERROR(plist) acpi_error plist
|
||||
#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -215,6 +216,7 @@
|
|||
#define ACPI_WARNING(plist)
|
||||
#define ACPI_EXCEPTION(plist)
|
||||
#define ACPI_ERROR(plist)
|
||||
#define ACPI_DEBUG_OBJECT(obj,l,i)
|
||||
|
||||
#endif /* ACPI_NO_ERROR_MESSAGES */
|
||||
|
||||
|
|
|
@ -233,8 +233,8 @@ acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
|
|||
* Interim function needed for PCI IRQ routing
|
||||
*/
|
||||
void
|
||||
acpi_os_derive_pci_id(acpi_handle rhandle,
|
||||
acpi_handle chandle, struct acpi_pci_id **pci_id);
|
||||
acpi_os_derive_pci_id(acpi_handle device,
|
||||
acpi_handle region, struct acpi_pci_id **pci_id);
|
||||
|
||||
/*
|
||||
* Miscellaneous
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
|
||||
/* Current ACPICA subsystem version in YYYYMMDD format */
|
||||
|
||||
#define ACPI_CA_VERSION 0x20100121
|
||||
#define ACPI_CA_VERSION 0x20100428
|
||||
|
||||
#include "actypes.h"
|
||||
#include "actbl.h"
|
||||
|
@ -67,6 +67,8 @@ extern u8 acpi_gbl_leave_wake_gpes_disabled;
|
|||
extern u8 acpi_gbl_use_default_register_widths;
|
||||
extern acpi_name acpi_gbl_trace_method_name;
|
||||
extern u32 acpi_gbl_trace_flags;
|
||||
extern u8 acpi_gbl_enable_aml_debug_object;
|
||||
extern u8 acpi_gbl_copy_dsdt_locally;
|
||||
|
||||
extern u32 acpi_current_gpe_count;
|
||||
extern struct acpi_table_fadt acpi_gbl_FADT;
|
||||
|
@ -164,7 +166,7 @@ acpi_get_devices(const char *HID,
|
|||
void *context, void **return_value);
|
||||
|
||||
acpi_status
|
||||
acpi_get_name(acpi_handle handle,
|
||||
acpi_get_name(acpi_handle object,
|
||||
u32 name_type, struct acpi_buffer *ret_path_ptr);
|
||||
|
||||
acpi_status
|
||||
|
@ -172,14 +174,12 @@ acpi_get_handle(acpi_handle parent,
|
|||
acpi_string pathname, acpi_handle * ret_handle);
|
||||
|
||||
acpi_status
|
||||
acpi_attach_data(acpi_handle obj_handle,
|
||||
acpi_object_handler handler, void *data);
|
||||
acpi_attach_data(acpi_handle object, acpi_object_handler handler, void *data);
|
||||
|
||||
acpi_status acpi_detach_data(acpi_handle object, acpi_object_handler handler);
|
||||
|
||||
acpi_status
|
||||
acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler);
|
||||
|
||||
acpi_status
|
||||
acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data);
|
||||
acpi_get_data(acpi_handle object, acpi_object_handler handler, void **data);
|
||||
|
||||
acpi_status
|
||||
acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags);
|
||||
|
@ -201,7 +201,7 @@ acpi_evaluate_object_typed(acpi_handle object,
|
|||
acpi_object_type return_type);
|
||||
|
||||
acpi_status
|
||||
acpi_get_object_info(acpi_handle handle,
|
||||
acpi_get_object_info(acpi_handle object,
|
||||
struct acpi_device_info **return_buffer);
|
||||
|
||||
acpi_status acpi_install_method(u8 *buffer);
|
||||
|
@ -283,16 +283,17 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
|
|||
*/
|
||||
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action);
|
||||
|
||||
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
|
||||
acpi_status
|
||||
acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
|
||||
|
||||
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type);
|
||||
acpi_status
|
||||
acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type);
|
||||
|
||||
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags);
|
||||
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
|
||||
|
||||
acpi_status
|
||||
acpi_get_gpe_status(acpi_handle gpe_device,
|
||||
u32 gpe_number,
|
||||
u32 flags, acpi_event_status * event_status);
|
||||
u32 gpe_number, acpi_event_status *event_status);
|
||||
|
||||
acpi_status acpi_disable_all_gpes(void);
|
||||
|
||||
|
@ -315,33 +316,29 @@ acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
|
|||
void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_get_vendor_resource(acpi_handle device_handle,
|
||||
acpi_get_vendor_resource(acpi_handle device,
|
||||
char *name,
|
||||
struct acpi_vendor_uuid *uuid,
|
||||
struct acpi_buffer *ret_buffer);
|
||||
|
||||
acpi_status
|
||||
acpi_get_current_resources(acpi_handle device_handle,
|
||||
struct acpi_buffer *ret_buffer);
|
||||
acpi_get_current_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
|
||||
|
||||
#ifdef ACPI_FUTURE_USAGE
|
||||
acpi_status
|
||||
acpi_get_possible_resources(acpi_handle device_handle,
|
||||
struct acpi_buffer *ret_buffer);
|
||||
acpi_get_possible_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
|
||||
#endif
|
||||
|
||||
acpi_status
|
||||
acpi_walk_resources(acpi_handle device_handle,
|
||||
acpi_walk_resources(acpi_handle device,
|
||||
char *name,
|
||||
acpi_walk_resource_callback user_function, void *context);
|
||||
|
||||
acpi_status
|
||||
acpi_set_current_resources(acpi_handle device_handle,
|
||||
struct acpi_buffer *in_buffer);
|
||||
acpi_set_current_resources(acpi_handle device, struct acpi_buffer *in_buffer);
|
||||
|
||||
acpi_status
|
||||
acpi_get_irq_routing_table(acpi_handle bus_device_handle,
|
||||
struct acpi_buffer *ret_buffer);
|
||||
acpi_get_irq_routing_table(acpi_handle device, struct acpi_buffer *ret_buffer);
|
||||
|
||||
acpi_status
|
||||
acpi_resource_to_address64(struct acpi_resource *resource,
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */
|
||||
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
|
||||
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
|
||||
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
|
||||
#define ACPI_SIG_SLIC "SLIC" /* Software Licensing Description Table */
|
||||
#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
|
||||
#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
|
||||
|
@ -677,6 +678,32 @@ struct acpi_mcfg_allocation {
|
|||
u32 reserved;
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* MCHI - Management Controller Host Interface Table
|
||||
* Version 1
|
||||
*
|
||||
* Conforms to "Management Component Transport Protocol (MCTP) Host
|
||||
* Interface Specification", Revision 1.0.0a, October 13, 2009
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
struct acpi_table_mchi {
|
||||
struct acpi_table_header header; /* Common ACPI table header */
|
||||
u8 interface_type;
|
||||
u8 protocol;
|
||||
u64 protocol_data;
|
||||
u8 interrupt_type;
|
||||
u8 gpe;
|
||||
u8 pci_device_flag;
|
||||
u32 global_interrupt;
|
||||
struct acpi_generic_address control_register;
|
||||
u8 pci_segment;
|
||||
u8 pci_bus;
|
||||
u8 pci_device;
|
||||
u8 pci_function;
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* SPCR - Serial Port Console Redirection table
|
||||
|
|
|
@ -663,44 +663,42 @@ typedef u32 acpi_event_status;
|
|||
#define ACPI_GPE_MAX 0xFF
|
||||
#define ACPI_NUM_GPE 256
|
||||
|
||||
/* Actions for acpi_set_gpe */
|
||||
|
||||
#define ACPI_GPE_ENABLE 0
|
||||
#define ACPI_GPE_DISABLE 1
|
||||
|
||||
/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */
|
||||
|
||||
#define ACPI_GPE_TYPE_WAKE (u8) 0x01
|
||||
#define ACPI_GPE_TYPE_RUNTIME (u8) 0x02
|
||||
#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x03
|
||||
|
||||
/*
|
||||
* GPE info flags - Per GPE
|
||||
* +-+-+-+---+-+-+-+
|
||||
* |7|6|5|4:3|2|1|0|
|
||||
* +-+-+-+---+-+-+-+
|
||||
* | | | | | | |
|
||||
* | | | | | | +--- Interrupt type: Edge or Level Triggered
|
||||
* | | | | | +--- GPE can wake the system
|
||||
* | | | | +--- Unused
|
||||
* | | | +--- Type of dispatch -- to method, handler, or none
|
||||
* | | +--- Unused
|
||||
* | +--- Unused
|
||||
* +--- Unused
|
||||
* +-------+---+-+-+
|
||||
* | 7:4 |3:2|1|0|
|
||||
* +-------+---+-+-+
|
||||
* | | | |
|
||||
* | | | +--- Interrupt type: edge or level triggered
|
||||
* | | +----- GPE can wake the system
|
||||
* | +-------- Type of dispatch:to method, handler, or none
|
||||
* +-------------- <Reserved>
|
||||
*/
|
||||
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
|
||||
#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
|
||||
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
|
||||
|
||||
#define ACPI_GPE_TYPE_MASK (u8) 0x06
|
||||
#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06
|
||||
#define ACPI_GPE_TYPE_WAKE (u8) 0x02
|
||||
#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */
|
||||
#define ACPI_GPE_CAN_WAKE (u8) 0x02
|
||||
|
||||
#define ACPI_GPE_DISPATCH_MASK (u8) 0x18
|
||||
#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08
|
||||
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10
|
||||
#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */
|
||||
#define ACPI_GPE_DISPATCH_MASK (u8) 0x0C
|
||||
#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04
|
||||
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x08
|
||||
#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00
|
||||
|
||||
/*
|
||||
* Flags for GPE and Lock interfaces
|
||||
*/
|
||||
#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */
|
||||
#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */
|
||||
|
||||
#define ACPI_NOT_ISR 0x1
|
||||
#define ACPI_ISR 0x0
|
||||
|
||||
|
@ -953,7 +951,7 @@ acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
|
|||
#define ACPI_REGION_DEACTIVATE 1
|
||||
|
||||
typedef
|
||||
acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
|
||||
acpi_status(*acpi_walk_callback) (acpi_handle object,
|
||||
u32 nesting_level,
|
||||
void *context, void **return_value);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user