Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9dce0e95 authored by Len Brown's avatar Len Brown
Browse files

Pull acpica into release branch

parents f1b2ad5d 967440e3
Loading
Loading
Loading
Loading
+0 −30
Original line number Diff line number Diff line
@@ -125,37 +125,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
		if (info->table_desc->pointer->revision == 1) {
			node->flags |= ANOBJ_DATA_WIDTH_32;
		}
#ifdef ACPI_INIT_PARSE_METHODS
		/*
		 * Note 11/2005: Removed this code to parse all methods during table
		 * load because it causes problems if there are any errors during the
		 * parse. Also, it seems like overkill and we probably don't want to
		 * abort a table load because of an issue with a single method.
		 */

		/*
		 * Print a dot for each method unless we are going to print
		 * the entire pathname
		 */
		if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
			ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
		}

		/*
		 * Always parse methods to detect errors, we will delete
		 * the parse tree below
		 */
		status = acpi_ds_parse_method(obj_handle);
		if (ACPI_FAILURE(status)) {
			ACPI_ERROR((AE_INFO,
				    "Method %p [%4.4s] - parse failure, %s",
				    obj_handle,
				    acpi_ut_get_node_name(obj_handle),
				    acpi_format_exception(status)));

			/* This parse failed, but we will continue parsing more methods */
		}
#endif
		info->method_count++;
		break;

+138 −192
Original line number Diff line number Diff line
@@ -52,6 +52,10 @@
#define _COMPONENT          ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")

/* Local prototypes */
static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);

/*******************************************************************************
 *
 * FUNCTION:    acpi_ds_method_error
@@ -67,6 +71,7 @@ ACPI_MODULE_NAME("dsmethod")
 *              Note: Allows the exception handler to change the status code
 *
 ******************************************************************************/

acpi_status
acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{
@@ -111,13 +116,53 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
	return (status);
}

/*******************************************************************************
 *
 * FUNCTION:    acpi_ds_create_method_mutex
 *
 * PARAMETERS:  obj_desc            - The method object
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Create a mutex object for a serialized control method
 *
 ******************************************************************************/

static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
{
	union acpi_operand_object *mutex_desc;
	acpi_status status;

	ACPI_FUNCTION_NAME(ds_create_method_mutex);

	/* Create the new mutex object */

	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
	if (!mutex_desc) {
		return_ACPI_STATUS(AE_NO_MEMORY);
	}

	/* Create the actual OS Mutex */

	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
	method_desc->method.mutex = mutex_desc;
	return_ACPI_STATUS(AE_OK);
}

/*******************************************************************************
 *
 * FUNCTION:    acpi_ds_begin_method_execution
 *
 * PARAMETERS:  method_node         - Node of the method
 *              obj_desc            - The method object
 *              calling_method_node - Caller of this method (if non-null)
 *              walk_state          - current state, NULL if not yet executing
 *                                    a method.
 *
 * RETURN:      Status
 *
@@ -130,7 +175,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
			       union acpi_operand_object *obj_desc,
			       struct acpi_namespace_node * calling_method_node)
			       struct acpi_walk_state *walk_state)
{
	acpi_status status = AE_OK;

@@ -149,35 +194,80 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
	}

	/*
	 * If there is a concurrency limit on this method, we need to
	 * obtain a unit from the method semaphore.
	 * If this method is serialized, we need to acquire the method mutex.
	 */
	if (obj_desc->method.semaphore) {
	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
		/*
		 * Allow recursive method calls, up to the reentrancy/concurrency
		 * limit imposed by the SERIALIZED rule and the sync_level method
		 * parameter.
		 *
		 * The point of this code is to avoid permanently blocking a
		 * thread that is making recursive method calls.
		 * Create a mutex for the method if it is defined to be Serialized
		 * and a mutex has not already been created. We defer the mutex creation
		 * until a method is actually executed, to minimize the object count
		 */
		if (method_node == calling_method_node) {
			if (obj_desc->method.thread_count >=
			    obj_desc->method.concurrency) {
				return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
		if (!obj_desc->method.mutex) {
			status = acpi_ds_create_method_mutex(obj_desc);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}
		}

		/*
		 * The current_sync_level (per-thread) must be less than or equal to
		 * the sync level of the method. This mechanism provides some
		 * deadlock prevention
		 *
		 * Top-level method invocation has no walk state at this point
		 */
		if (walk_state &&
		    (walk_state->thread->current_sync_level >
		     obj_desc->method.mutex->mutex.sync_level)) {
			ACPI_ERROR((AE_INFO,
				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
				    acpi_ut_get_node_name(method_node),
				    walk_state->thread->current_sync_level));

			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
		}

		/*
		 * Get a unit from the method semaphore. This releases the
		 * interpreter if we block (then reacquires it)
		 * Obtain the method mutex if necessary. Do not acquire mutex for a
		 * recursive call.
		 */
		if (!walk_state ||
		    !obj_desc->method.mutex->mutex.owner_thread ||
		    (walk_state->thread !=
		     obj_desc->method.mutex->mutex.owner_thread)) {
			/*
			 * Acquire the method mutex. This releases the interpreter if we
			 * block (and reacquires it before it returns)
			 */
			status =
		    acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
						      mutex.os_mutex,
						      ACPI_WAIT_FOREVER);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
			}

			/* Update the mutex and walk info and save the original sync_level */

			if (walk_state) {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    walk_state->thread->current_sync_level;

				obj_desc->method.mutex->mutex.owner_thread =
				    walk_state->thread;
				walk_state->thread->current_sync_level =
				    obj_desc->method.sync_level;
			} else {
				obj_desc->method.mutex->mutex.
				    original_sync_level =
				    obj_desc->method.mutex->mutex.sync_level;
			}
		}

		/* Always increase acquisition depth */

		obj_desc->method.mutex->mutex.acquisition_depth++;
	}

	/*
@@ -200,10 +290,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
	return_ACPI_STATUS(status);

      cleanup:
	/* On error, must signal the method semaphore if present */
	/* On error, must release the method mutex (if present) */

	if (obj_desc->method.semaphore) {
		(void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1);
	if (obj_desc->method.mutex) {
		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
	}
	return_ACPI_STATUS(status);
}
@@ -253,10 +343,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
		return_ACPI_STATUS(AE_NULL_OBJECT);
	}

	/* Init for new method, possibly wait on concurrency semaphore */
	/* Init for new method, possibly wait on method mutex */

	status = acpi_ds_begin_method_execution(method_node, obj_desc,
						this_walk_state->method_node);
						this_walk_state);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
@@ -478,6 +568,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
 *              created, delete all locals and arguments, and delete the parse
 *              tree if requested.
 *
 * MUTEX:       Interpreter is locked
 *
 ******************************************************************************/

void
@@ -503,26 +595,21 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
	}

	/*
	 * Lock the parser while we terminate this method.
	 * If this is the last thread executing the method,
	 * we have additional cleanup to perform
	 * If method is serialized, release the mutex and restore the
	 * current sync level for this thread
	 */
	status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
	if (ACPI_FAILURE(status)) {
		return_VOID;
	}

	/* Signal completion of the execution of this method if necessary */
	if (method_desc->method.mutex) {

	if (method_desc->method.semaphore) {
		status =
		    acpi_os_signal_semaphore(method_desc->method.semaphore, 1);
		if (ACPI_FAILURE(status)) {
		/* Acquisition Depth handles recursive calls */

			/* Ignore error and continue */
		method_desc->method.mutex->mutex.acquisition_depth--;
		if (!method_desc->method.mutex->mutex.acquisition_depth) {
			walk_state->thread->current_sync_level =
			    method_desc->method.mutex->mutex.
			    original_sync_level;

			ACPI_EXCEPTION((AE_INFO, status,
					"Could not signal method semaphore"));
			acpi_os_release_mutex(method_desc->method.mutex->mutex.
					      os_mutex);
		}
	}

@@ -537,7 +624,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,

		status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
		if (ACPI_FAILURE(status)) {
			goto exit;
			return_VOID;
		}

		/*
@@ -587,11 +674,9 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
		 * This code is here because we must wait until the last thread exits
		 * before creating the synchronization semaphore.
		 */
		if ((method_desc->method.concurrency == 1) &&
		    (!method_desc->method.semaphore)) {
			status = acpi_os_create_semaphore(1, 1,
							  &method_desc->method.
							  semaphore);
		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
		    && (!method_desc->method.mutex)) {
			status = acpi_ds_create_method_mutex(method_desc);
		}

		/* No more threads, we can free the owner_id */
@@ -599,144 +684,5 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
		acpi_ut_release_owner_id(&method_desc->method.owner_id);
	}

      exit:
	(void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
	return_VOID;
}

#ifdef ACPI_INIT_PARSE_METHODS
	/*
	 * Note 11/2005: Removed this code to parse all methods during table
	 * load because it causes problems if there are any errors during the
	 * parse. Also, it seems like overkill and we probably don't want to
	 * abort a table load because of an issue with a single method.
	 */

/*******************************************************************************
 *
 * FUNCTION:    acpi_ds_parse_method
 *
 * PARAMETERS:  Node        - Method node
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Parse the AML that is associated with the method.
 *
 * MUTEX:       Assumes parser is locked
 *
 ******************************************************************************/

acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
{
	acpi_status status;
	union acpi_operand_object *obj_desc;
	union acpi_parse_object *op;
	struct acpi_walk_state *walk_state;

	ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node);

	/* Parameter Validation */

	if (!node) {
		return_ACPI_STATUS(AE_NULL_ENTRY);
	}

	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
			  "**** Parsing [%4.4s] **** NamedObj=%p\n",
			  acpi_ut_get_node_name(node), node));

	/* Extract the method object from the method Node */

	obj_desc = acpi_ns_get_attached_object(node);
	if (!obj_desc) {
		return_ACPI_STATUS(AE_NULL_OBJECT);
	}

	/* Create a mutex for the method if there is a concurrency limit */

	if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
	    (!obj_desc->method.semaphore)) {
		status = acpi_os_create_semaphore(obj_desc->method.concurrency,
						  obj_desc->method.concurrency,
						  &obj_desc->method.semaphore);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}
	}

	/*
	 * Allocate a new parser op to be the root of the parsed
	 * method tree
	 */
	op = acpi_ps_alloc_op(AML_METHOD_OP);
	if (!op) {
		return_ACPI_STATUS(AE_NO_MEMORY);
	}

	/* Init new op with the method name and pointer back to the Node */

	acpi_ps_set_name(op, node->name.integer);
	op->common.node = node;

	/*
	 * Get a new owner_id for objects created by this method. Namespace
	 * objects (such as Operation Regions) can be created during the
	 * first pass parse.
	 */
	status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
	if (ACPI_FAILURE(status)) {
		goto cleanup;
	}

	/* Create and initialize a new walk state */

	walk_state =
	    acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
				      NULL);
	if (!walk_state) {
		status = AE_NO_MEMORY;
		goto cleanup2;
	}

	status = acpi_ds_init_aml_walk(walk_state, op, node,
				       obj_desc->method.aml_start,
				       obj_desc->method.aml_length, NULL, 1);
	if (ACPI_FAILURE(status)) {
		acpi_ds_delete_walk_state(walk_state);
		goto cleanup2;
	}

	/*
	 * Parse the method, first pass
	 *
	 * The first pass load is where newly declared named objects are added into
	 * the namespace.  Actual evaluation of the named objects (what would be
	 * called a "second pass") happens during the actual execution of the
	 * method so that operands to the named objects can take on dynamic
	 * run-time values.
	 */
	status = acpi_ps_parse_aml(walk_state);
	if (ACPI_FAILURE(status)) {
		goto cleanup2;
	}

	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
			  "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n",
			  acpi_ut_get_node_name(node), node, op));

	/*
	 * Delete the parse tree. We simply re-parse the method for every
	 * execution since there isn't much overhead (compared to keeping lots
	 * of parse trees around)
	 */
	acpi_ns_delete_namespace_subtree(node);
	acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);

      cleanup2:
	acpi_ut_release_owner_id(&obj_desc->method.owner_id);

      cleanup:
	acpi_ps_delete_parse_tree(op);
	return_ACPI_STATUS(status);
}
#endif
+1 −3
Original line number Diff line number Diff line
@@ -472,7 +472,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
			    acpi_ds_result_push(walk_state->result_obj,
						walk_state);
		}

		break;

	default:
@@ -510,6 +509,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
						  "Method Reference in a Package, Op=%p\n",
						  op));

				op->common.node =
				    (struct acpi_namespace_node *)op->asl.value.
				    arg->asl.node->object;
@@ -670,7 +670,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)

				status = acpi_ds_result_stack_pop(walk_state);
			}

			break;

		case AML_TYPE_UNDEFINED:
@@ -708,7 +707,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
	 * Check if we just completed the evaluation of a
	 * conditional predicate
	 */

	if ((ACPI_SUCCESS(status)) &&
	    (walk_state->control_state) &&
	    (walk_state->control_state->common.state ==
+23 −26
Original line number Diff line number Diff line
@@ -219,7 +219,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
			 * Note: silently change the type here. On the second pass, we will report
			 * a warning
			 */

			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
					  "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
					  path,
@@ -242,7 +241,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
		break;

	default:

		/*
		 * For all other named opcodes, we will enter the name into
		 * the namespace.
@@ -259,7 +257,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
		 *       buffer_field, or Package), the name of the object is already
		 *       in the namespace.
		 */

		if (walk_state->deferred_node) {

			/* This name is already in the namespace, get the node */
@@ -327,12 +324,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
							    (status);
						}
					}

					status = AE_OK;
				}
			}

			if (ACPI_FAILURE(status)) {

				ACPI_ERROR_NAMESPACE(path, status);
				return_ACPI_STATUS(status);
			}
@@ -434,9 +431,13 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
			status =
			    acpi_ex_create_region(op->named.data,
						  op->named.length,
						  (acpi_adr_space_type)
						  ((op->common.value.arg)->
						   common.value.integer),
						  (acpi_adr_space_type) ((op->
									  common.
									  value.
									  arg)->
									 common.
									 value.
									 integer),
						  walk_state);
			if (ACPI_FAILURE(status)) {
				return_ACPI_STATUS(status);
@@ -499,6 +500,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
								  length,
								  walk_state);
				}

				walk_state->operands[0] = NULL;
				walk_state->num_operands = 0;

@@ -570,7 +572,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
			if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
			    (walk_state->op_info->class == AML_CLASS_CONTROL)) {

				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
						  "Begin/EXEC: %s (fl %8.8X)\n",
						  walk_state->op_info->name,
@@ -602,7 +603,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
		} else {
			/* Get name from the op */

			buffer_ptr = (char *)&op->named.name;
			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
		}
	} else {
		/* Get the namestring from the raw AML */
@@ -629,7 +630,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
		break;

	case AML_INT_NAMEPATH_OP:

		/*
		 * The name_path is an object reference to an existing object.
		 * Don't enter the name into the namespace, but look it up
@@ -642,7 +642,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
		break;

	case AML_SCOPE_OP:

		/*
		 * The Path is an object reference to an existing object.
		 * Don't enter the name into the namespace, but look it up
@@ -664,6 +663,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
#endif
			return_ACPI_STATUS(status);
		}

		/*
		 * We must check to make sure that the target is
		 * one of the opcodes that actually opens a scope
@@ -689,7 +689,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
			 *  Name (DEB, 0)
			 *  Scope (DEB) { ... }
			 */

			ACPI_WARNING((AE_INFO,
				      "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
				      buffer_ptr,
@@ -729,8 +728,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
				if (ACPI_FAILURE(status)) {
					return_ACPI_STATUS(status);
				}

			}

			return_ACPI_STATUS(AE_OK);
		}

@@ -787,7 +786,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
	 * can get it again quickly when this scope is closed
	 */
	op->common.node = node;

	return_ACPI_STATUS(status);
}

@@ -922,7 +920,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
#ifndef ACPI_NO_METHOD_EXECUTION

	case AML_TYPE_CREATE_FIELD:

		/*
		 * Create the field object, but the field buffer and index must
		 * be evaluated later during the execution phase
@@ -931,7 +928,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
		break;

	case AML_TYPE_NAMED_FIELD:

		/*
		 * If we are executing a method, initialize the field
		 */
@@ -1051,6 +1047,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
			 * argument is the space_id. (We must save the address of the
			 * AML of the address and length operands)
			 */

			/*
			 * If we have a valid region, initialize it
			 * Namespace is NOT locked at this point.
+5 −9
Original line number Diff line number Diff line
@@ -382,7 +382,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
	u32 status_reg;
	u32 enable_reg;
	acpi_cpu_flags flags;
	acpi_cpu_flags hw_flags;
	acpi_native_uint i;
	acpi_native_uint j;

@@ -394,8 +393,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
		return (int_status);
	}

	/* We need to hold the GPE lock now, hardware lock in the loop */

	/*
	 * We need to obtain the GPE lock for both the data structs and registers
	 * Note: Not necessary to obtain the hardware lock, since the GPE registers
	 * are owned by the gpe_lock.
	 */
	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);

	/* Examine all GPE blocks attached to this interrupt level */
@@ -413,8 +415,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)

			gpe_register_info = &gpe_block->register_info[i];

			hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);

			/* Read the Status Register */

			status =
@@ -423,8 +423,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
						   &gpe_register_info->
						   status_address);
			if (ACPI_FAILURE(status)) {
				acpi_os_release_lock(acpi_gbl_hardware_lock,
						     hw_flags);
				goto unlock_and_exit;
			}

@@ -435,8 +433,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
						   &enable_reg,
						   &gpe_register_info->
						   enable_address);
			acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);

			if (ACPI_FAILURE(status)) {
				goto unlock_and_exit;
			}
Loading