Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b4a34aa6 authored by Corey Minyard's avatar Corey Minyard Committed by Greg Kroah-Hartman
Browse files

ipmi: Fix how the lower layers are told to watch for messages



[ Upstream commit c65ea996595005be470fbfa16711deba414fd33b ]

The IPMI driver has a mechanism to tell the lower layers it needs
to watch for messages, commands, and watchdogs (so it doesn't
needlessly poll).  However, it needed some extensions, it needed
a way to tell what is being waited for so it could set the timeout
appropriately.

The update to the lower layer was also being done once a second
at best because it was done in the main timeout handler.  However,
if a command is sent and a response message is coming back,
it needed to be started immediately.  So modify the code to
update immediately if it needs to be enabled.  Disable is still
lazy.

Signed-off-by: default avatarCorey Minyard <cminyard@mvista.com>
Tested-by: default avatarKamlakant Patel <kamlakant.patel@cavium.com>
Stable-dep-of: 6d2555cde291 ("ipmi: fix SSIF not responding under certain cond.")
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent ffbbcbd3
Loading
Loading
Loading
Loading
+85 −34
Original line number Original line Diff line number Diff line
@@ -536,9 +536,22 @@ struct ipmi_smi {
	unsigned int     waiting_events_count; /* How many events in queue? */
	unsigned int     waiting_events_count; /* How many events in queue? */
	char             delivering_events;
	char             delivering_events;
	char             event_msg_printed;
	char             event_msg_printed;

	/* How many users are waiting for events? */
	atomic_t         event_waiters;
	atomic_t         event_waiters;
	unsigned int     ticks_to_req_ev;
	unsigned int     ticks_to_req_ev;
	int              last_needs_timer;

	/* How many users are waiting for commands? */
	atomic_t         command_waiters;

	/* How many users are waiting for watchdogs? */
	atomic_t         watchdog_waiters;

	/*
	 * Tells what the lower layer has last been asked to watch for,
	 * messages and/or watchdogs.  Protected by xmit_msgs_lock.
	 */
	unsigned int     last_watch_mask;


	/*
	/*
	 * The event receiver for my BMC, only really used at panic
	 * The event receiver for my BMC, only really used at panic
@@ -1085,6 +1098,29 @@ static int intf_err_seq(struct ipmi_smi *intf,
	return rv;
	return rv;
}
}


/* Must be called with xmit_msgs_lock held. */
static void smi_tell_to_watch(struct ipmi_smi *intf,
			      unsigned int flags,
			      struct ipmi_smi_msg *smi_msg)
{
	if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) {
		if (!smi_msg)
			return;

		if (!smi_msg->needs_response)
			return;
	}

	if (!intf->handlers->set_need_watch)
		return;

	if ((intf->last_watch_mask & flags) == flags)
		return;

	intf->last_watch_mask |= flags;
	intf->handlers->set_need_watch(intf->send_info,
				       intf->last_watch_mask);
}


static void free_user_work(struct work_struct *work)
static void free_user_work(struct work_struct *work)
{
{
@@ -1164,8 +1200,9 @@ int ipmi_create_user(unsigned int if_num,
	spin_unlock_irqrestore(&intf->seq_lock, flags);
	spin_unlock_irqrestore(&intf->seq_lock, flags);
	if (handler->ipmi_watchdog_pretimeout) {
	if (handler->ipmi_watchdog_pretimeout) {
		/* User wants pretimeouts, so make sure to watch for them. */
		/* User wants pretimeouts, so make sure to watch for them. */
		if (atomic_inc_return(&intf->event_waiters) == 1)
		if (atomic_inc_return(&intf->watchdog_waiters) == 1)
			need_waiter(intf);
			smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG,
					  NULL);
	}
	}
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
	*user = new_user;
	*user = new_user;
@@ -1239,7 +1276,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
		user->handler->shutdown(user->handler_data);
		user->handler->shutdown(user->handler_data);


	if (user->handler->ipmi_watchdog_pretimeout)
	if (user->handler->ipmi_watchdog_pretimeout)
		atomic_dec(&intf->event_waiters);
		atomic_dec(&intf->watchdog_waiters);


	if (user->gets_events)
	if (user->gets_events)
		atomic_dec(&intf->event_waiters);
		atomic_dec(&intf->event_waiters);
@@ -1597,8 +1634,8 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
		goto out_unlock;
		goto out_unlock;
	}
	}


	if (atomic_inc_return(&intf->event_waiters) == 1)
	if (atomic_inc_return(&intf->command_waiters) == 1)
		need_waiter(intf);
		smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS, NULL);


	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);


@@ -1648,7 +1685,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
	synchronize_rcu();
	synchronize_rcu();
	release_ipmi_user(user, index);
	release_ipmi_user(user, index);
	while (rcvrs) {
	while (rcvrs) {
		atomic_dec(&intf->event_waiters);
		atomic_dec(&intf->command_waiters);
		rcvr = rcvrs;
		rcvr = rcvrs;
		rcvrs = rcvr->next;
		rcvrs = rcvr->next;
		kfree(rcvr);
		kfree(rcvr);
@@ -1765,22 +1802,21 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
	return smi_msg;
	return smi_msg;
}
}



static void smi_send(struct ipmi_smi *intf,
static void smi_send(struct ipmi_smi *intf,
		     const struct ipmi_smi_handlers *handlers,
		     const struct ipmi_smi_handlers *handlers,
		     struct ipmi_smi_msg *smi_msg, int priority)
		     struct ipmi_smi_msg *smi_msg, int priority)
{
{
	int run_to_completion = intf->run_to_completion;
	int run_to_completion = intf->run_to_completion;
	unsigned long flags = 0;


	if (run_to_completion) {
	if (!run_to_completion)
		smi_msg = smi_add_send_msg(intf, smi_msg, priority);
	} else {
		unsigned long flags;

		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
	smi_msg = smi_add_send_msg(intf, smi_msg, priority);
	smi_msg = smi_add_send_msg(intf, smi_msg, priority);

	smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, smi_msg);

	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
	}


	if (smi_msg)
	if (smi_msg)
		handlers->sender(intf->send_info, smi_msg);
		handlers->sender(intf->send_info, smi_msg);
@@ -1978,6 +2014,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
				ipmb_seq, broadcast,
				ipmb_seq, broadcast,
				source_address, source_lun);
				source_address, source_lun);


		/* We will be getting a response in the BMC message queue. */
		smi_msg->needs_response = true;

		/*
		/*
		 * Copy the message into the recv message data, so we
		 * Copy the message into the recv message data, so we
		 * can retransmit it later if necessary.
		 * can retransmit it later if necessary.
@@ -2165,6 +2204,7 @@ static int i_ipmi_request(struct ipmi_user *user,
			goto out;
			goto out;
		}
		}
	}
	}
	smi_msg->needs_response = false;


	rcu_read_lock();
	rcu_read_lock();
	if (intf->in_shutdown) {
	if (intf->in_shutdown) {
@@ -3386,6 +3426,8 @@ int ipmi_add_smi(struct module *owner,
	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
	INIT_LIST_HEAD(&intf->hp_xmit_msgs);
	spin_lock_init(&intf->events_lock);
	spin_lock_init(&intf->events_lock);
	atomic_set(&intf->event_waiters, 0);
	atomic_set(&intf->event_waiters, 0);
	atomic_set(&intf->watchdog_waiters, 0);
	atomic_set(&intf->command_waiters, 0);
	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
	intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
	INIT_LIST_HEAD(&intf->waiting_events);
	INIT_LIST_HEAD(&intf->waiting_events);
	intf->waiting_events_count = 0;
	intf->waiting_events_count = 0;
@@ -4404,6 +4446,9 @@ static void smi_recv_tasklet(unsigned long val)
			intf->curr_msg = newmsg;
			intf->curr_msg = newmsg;
		}
		}
	}
	}

	smi_tell_to_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES, newmsg);

	if (!run_to_completion)
	if (!run_to_completion)
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
	if (newmsg)
	if (newmsg)
@@ -4531,7 +4576,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
			      struct list_head *timeouts,
			      struct list_head *timeouts,
			      unsigned long timeout_period,
			      unsigned long timeout_period,
			      int slot, unsigned long *flags,
			      int slot, unsigned long *flags,
			      unsigned int *waiting_msgs)
			      unsigned int *watch_mask)
{
{
	struct ipmi_recv_msg *msg;
	struct ipmi_recv_msg *msg;


@@ -4543,7 +4588,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,


	if (timeout_period < ent->timeout) {
	if (timeout_period < ent->timeout) {
		ent->timeout -= timeout_period;
		ent->timeout -= timeout_period;
		(*waiting_msgs)++;
		*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;
		return;
		return;
	}
	}


@@ -4562,7 +4607,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
		struct ipmi_smi_msg *smi_msg;
		struct ipmi_smi_msg *smi_msg;
		/* More retries, send again. */
		/* More retries, send again. */


		(*waiting_msgs)++;
		*watch_mask |= IPMI_WATCH_MASK_CHECK_MESSAGES;


		/*
		/*
		 * Start with the max timer, set to normal timer after
		 * Start with the max timer, set to normal timer after
@@ -4614,13 +4659,13 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
	struct ipmi_recv_msg *msg, *msg2;
	struct ipmi_recv_msg *msg, *msg2;
	unsigned long        flags;
	unsigned long        flags;
	int                  i;
	int                  i;
	unsigned int         waiting_msgs = 0;
	unsigned int         watch_mask = 0;


	if (!intf->bmc_registered) {
	if (!intf->bmc_registered) {
		kref_get(&intf->refcount);
		kref_get(&intf->refcount);
		if (!schedule_work(&intf->bmc_reg_work)) {
		if (!schedule_work(&intf->bmc_reg_work)) {
			kref_put(&intf->refcount, intf_free);
			kref_put(&intf->refcount, intf_free);
			waiting_msgs++;
			watch_mask |= IPMI_WATCH_MASK_INTERNAL;
		}
		}
	}
	}


@@ -4640,7 +4685,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
		check_msg_timeout(intf, &intf->seq_table[i],
		check_msg_timeout(intf, &intf->seq_table[i],
				  &timeouts, timeout_period, i,
				  &timeouts, timeout_period, i,
				  &flags, &waiting_msgs);
				  &flags, &watch_mask);
	spin_unlock_irqrestore(&intf->seq_lock, flags);
	spin_unlock_irqrestore(&intf->seq_lock, flags);


	list_for_each_entry_safe(msg, msg2, &timeouts, link)
	list_for_each_entry_safe(msg, msg2, &timeouts, link)
@@ -4671,7 +4716,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,


	tasklet_schedule(&intf->recv_tasklet);
	tasklet_schedule(&intf->recv_tasklet);


	return waiting_msgs;
	return watch_mask;
}
}


static void ipmi_request_event(struct ipmi_smi *intf)
static void ipmi_request_event(struct ipmi_smi *intf)
@@ -4691,37 +4736,43 @@ static atomic_t stop_operation;
static void ipmi_timeout(struct timer_list *unused)
static void ipmi_timeout(struct timer_list *unused)
{
{
	struct ipmi_smi *intf;
	struct ipmi_smi *intf;
	int nt = 0, index;
	unsigned int watch_mask = 0;
	int index;
	unsigned long flags;


	if (atomic_read(&stop_operation))
	if (atomic_read(&stop_operation))
		return;
		return;


	index = srcu_read_lock(&ipmi_interfaces_srcu);
	index = srcu_read_lock(&ipmi_interfaces_srcu);
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
		int lnt = 0;

		if (atomic_read(&intf->event_waiters)) {
		if (atomic_read(&intf->event_waiters)) {
			intf->ticks_to_req_ev--;
			intf->ticks_to_req_ev--;
			if (intf->ticks_to_req_ev == 0) {
			if (intf->ticks_to_req_ev == 0) {
				ipmi_request_event(intf);
				ipmi_request_event(intf);
				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
				intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
			}
			}
			lnt++;
			watch_mask |= IPMI_WATCH_MASK_INTERNAL;
		}
		}


		lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
		if (atomic_read(&intf->watchdog_waiters))
			watch_mask |= IPMI_WATCH_MASK_CHECK_WATCHDOG;


		lnt = !!lnt;
		if (atomic_read(&intf->command_waiters))
		if (lnt != intf->last_needs_timer &&
			watch_mask |= IPMI_WATCH_MASK_CHECK_COMMANDS;
					intf->handlers->set_need_watch)

			intf->handlers->set_need_watch(intf->send_info, lnt);
		watch_mask |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
		intf->last_needs_timer = lnt;


		nt += lnt;
		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
		if (watch_mask != intf->last_watch_mask &&
					intf->handlers->set_need_watch)
			intf->handlers->set_need_watch(intf->send_info,
						       watch_mask);
		intf->last_watch_mask = watch_mask;
		spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
	}
	}
	srcu_read_unlock(&ipmi_interfaces_srcu, index);
	srcu_read_unlock(&ipmi_interfaces_srcu, index);


	if (nt)
	if (watch_mask)
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
		mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
}


+4 −1
Original line number Original line Diff line number Diff line
@@ -1073,10 +1073,13 @@ static void request_events(void *send_info)
	atomic_set(&smi_info->req_events, 1);
	atomic_set(&smi_info->req_events, 1);
}
}


static void set_need_watch(void *send_info, bool enable)
static void set_need_watch(void *send_info, unsigned int watch_mask)
{
{
	struct smi_info *smi_info = send_info;
	struct smi_info *smi_info = send_info;
	unsigned long flags;
	unsigned long flags;
	int enable;

	enable = !!(watch_mask & ~IPMI_WATCH_MASK_INTERNAL);


	atomic_set(&smi_info->need_watch, enable);
	atomic_set(&smi_info->need_watch, enable);
	spin_lock_irqsave(&smi_info->si_lock, flags);
	spin_lock_irqsave(&smi_info->si_lock, flags);
+16 −10
Original line number Original line Diff line number Diff line
@@ -91,8 +91,8 @@
/*
/*
 * Timeout for the watch, only used for get flag timer.
 * Timeout for the watch, only used for get flag timer.
 */
 */
#define SSIF_WATCH_TIMEOUT_MSEC	   100
#define SSIF_WATCH_MSG_TIMEOUT		msecs_to_jiffies(10)
#define SSIF_WATCH_TIMEOUT_JIFFIES msecs_to_jiffies(SSIF_WATCH_TIMEOUT_MSEC)
#define SSIF_WATCH_WATCHDOG_TIMEOUT	msecs_to_jiffies(250)


enum ssif_intf_state {
enum ssif_intf_state {
	SSIF_NORMAL,
	SSIF_NORMAL,
@@ -274,7 +274,7 @@ struct ssif_info {
	struct timer_list retry_timer;
	struct timer_list retry_timer;
	int retries_left;
	int retries_left;


	bool need_watch;		/* Need to look for flags? */
	long watch_timeout;		/* Timeout for flags check, 0 if off. */
	struct timer_list watch_timer;	/* Flag fetch timer. */
	struct timer_list watch_timer;	/* Flag fetch timer. */


	/* Info from SSIF cmd */
	/* Info from SSIF cmd */
@@ -576,9 +576,9 @@ static void watch_timeout(struct timer_list *t)
		return;
		return;


	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
	if (ssif_info->need_watch) {
	if (ssif_info->watch_timeout) {
		mod_timer(&ssif_info->watch_timer,
		mod_timer(&ssif_info->watch_timer,
			  jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
			  jiffies + ssif_info->watch_timeout);
		if (SSIF_IDLE(ssif_info)) {
		if (SSIF_IDLE(ssif_info)) {
			start_flag_fetch(ssif_info, flags); /* Releases lock */
			start_flag_fetch(ssif_info, flags); /* Releases lock */
			return;
			return;
@@ -1151,17 +1151,23 @@ static void request_events(void *send_info)
 * Upper layer is changing the flag saying whether we need to request
 * Upper layer is changing the flag saying whether we need to request
 * flags periodically or not.
 * flags periodically or not.
 */
 */
static void ssif_set_need_watch(void *send_info, bool enable)
static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
{
{
	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
	struct ssif_info *ssif_info = (struct ssif_info *) send_info;
	unsigned long oflags, *flags;
	unsigned long oflags, *flags;
	long timeout = 0;

	if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES)
		timeout = SSIF_WATCH_MSG_TIMEOUT;
	else if (watch_mask & ~IPMI_WATCH_MASK_INTERNAL)
		timeout = SSIF_WATCH_WATCHDOG_TIMEOUT;


	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
	if (enable != ssif_info->need_watch) {
	if (timeout != ssif_info->watch_timeout) {
		ssif_info->need_watch = enable;
		ssif_info->watch_timeout = timeout;
		if (ssif_info->need_watch)
		if (ssif_info->watch_timeout)
			mod_timer(&ssif_info->watch_timer,
			mod_timer(&ssif_info->watch_timer,
				  jiffies + SSIF_WATCH_TIMEOUT_JIFFIES);
				  jiffies + ssif_info->watch_timeout);
	}
	}
	ipmi_ssif_unlock_cond(ssif_info, flags);
	ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
+29 −7
Original line number Original line Diff line number Diff line
@@ -30,6 +30,17 @@ struct device;
/* Structure for the low-level drivers. */
/* Structure for the low-level drivers. */
typedef struct ipmi_smi *ipmi_smi_t;
typedef struct ipmi_smi *ipmi_smi_t;


/*
 * Flags for set_check_watch() below.  Tells if the SMI should be
 * waiting for watchdog timeouts, commands and/or messages.  There is
 * also an internal flag for the message handler, SMIs should ignore
 * it.
 */
#define IPMI_WATCH_MASK_INTERNAL	(1 << 0)
#define IPMI_WATCH_MASK_CHECK_MESSAGES	(1 << 1)
#define IPMI_WATCH_MASK_CHECK_WATCHDOG	(1 << 2)
#define IPMI_WATCH_MASK_CHECK_COMMANDS	(1 << 3)

/*
/*
 * Messages to/from the lower layer.  The smi interface will take one
 * Messages to/from the lower layer.  The smi interface will take one
 * of these to send. After the send has occurred and a response has
 * of these to send. After the send has occurred and a response has
@@ -55,8 +66,16 @@ struct ipmi_smi_msg {
	int           rsp_size;
	int           rsp_size;
	unsigned char rsp[IPMI_MAX_MSG_LENGTH];
	unsigned char rsp[IPMI_MAX_MSG_LENGTH];


	/* Will be called when the system is done with the message
	/*
	   (presumably to free it). */
	 * There should be a response message coming back in the BMC
	 * message queue.
	 */
	bool needs_response;

	/*
	 * Will be called when the system is done with the message
	 * (presumably to free it).
	 */
	void (*done)(struct ipmi_smi_msg *msg);
	void (*done)(struct ipmi_smi_msg *msg);
};
};


@@ -105,12 +124,15 @@ struct ipmi_smi_handlers {


	/*
	/*
	 * Called by the upper layer when some user requires that the
	 * Called by the upper layer when some user requires that the
	 * interface watch for events, received messages, watchdog
	 * interface watch for received messages and watchdog
	 * pretimeouts, or not.  Used by the SMI to know if it should
	 * pretimeouts (basically do a "Get Flags", or not.  Used by
	 * watch for these.  This may be NULL if the SMI does not
	 * the SMI to know if it should watch for these.  This may be
	 * implement it.
	 * NULL if the SMI does not implement it.  watch_mask is from
	 * IPMI_WATCH_MASK_xxx above.  The interface should run slower
	 * timeouts for just watchdog checking or faster timeouts when
	 * waiting for the message queue.
	 */
	 */
	void (*set_need_watch)(void *send_info, bool enable);
	void (*set_need_watch)(void *send_info, unsigned int watch_mask);


	/*
	/*
	 * Called when flushing all pending messages.
	 * Called when flushing all pending messages.