5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/completion.h>
26 #include <linux/slab.h>
28 #include "rsxx_priv.h"
30 #define CREG_TIMEOUT_MSEC 10000
32 typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
37 struct list_head list;
48 static struct kmem_cache *creg_cmd_pool;
51 /*------------ Private Functions --------------*/
53 #if defined(__LITTLE_ENDIAN)
54 #define LITTLE_ENDIAN 1
55 #elif defined(__BIG_ENDIAN)
56 #define LITTLE_ENDIAN 0
58 #error Unknown endianess!!! Aborting...
61 static void copy_to_creg_data(struct rsxx_cardinfo *card,
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
71 * Firmware implementation makes it necessary to byte swap on
72 * little endian processors.
74 if (LITTLE_ENDIAN && stream)
75 iowrite32be(data[i], card->regmap + CREG_DATA(i));
77 iowrite32(data[i], card->regmap + CREG_DATA(i));
82 static void copy_from_creg_data(struct rsxx_cardinfo *card,
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
92 * Firmware implementation makes it necessary to byte swap on
93 * little endian processors.
95 if (LITTLE_ENDIAN && stream)
96 data[i] = ioread32be(card->regmap + CREG_DATA(i));
98 data[i] = ioread32(card->regmap + CREG_DATA(i));
102 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
104 iowrite32(cmd->addr, card->regmap + CREG_ADD);
105 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
107 if (cmd->op == CREG_OP_WRITE) {
109 copy_to_creg_data(card, cmd->cnt8,
110 cmd->buf, cmd->stream);
113 /* Setting the valid bit will kick off the command. */
114 iowrite32(cmd->op, card->regmap + CREG_CMD);
117 static void creg_kick_queue(struct rsxx_cardinfo *card)
119 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
122 card->creg_ctrl.active = 1;
123 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
124 struct creg_cmd, list);
125 list_del(&card->creg_ctrl.active_cmd->list);
126 card->creg_ctrl.q_depth--;
129 * We have to set the timer before we push the new command. Otherwise,
130 * we could create a race condition that would occur if the timer
131 * was not canceled, and expired after the new command was pushed,
132 * but before the command was issued to hardware.
134 mod_timer(&card->creg_ctrl.cmd_timer,
135 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
137 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
140 static int creg_queue_cmd(struct rsxx_cardinfo *card,
146 creg_cmd_cb callback,
149 struct creg_cmd *cmd;
151 /* Don't queue stuff up if we're halted. */
152 if (unlikely(card->halt))
155 if (card->creg_ctrl.reset)
158 if (cnt8 > MAX_CREG_DATA8)
161 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
165 INIT_LIST_HEAD(&cmd->list);
171 cmd->stream = stream;
173 cmd->cb_private = cb_private;
176 spin_lock_bh(&card->creg_ctrl.lock);
177 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
178 card->creg_ctrl.q_depth++;
179 creg_kick_queue(card);
180 spin_unlock_bh(&card->creg_ctrl.lock);
185 static void creg_cmd_timed_out(unsigned long data)
187 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
188 struct creg_cmd *cmd;
190 spin_lock(&card->creg_ctrl.lock);
191 cmd = card->creg_ctrl.active_cmd;
192 card->creg_ctrl.active_cmd = NULL;
193 spin_unlock(&card->creg_ctrl.lock);
196 card->creg_ctrl.creg_stats.creg_timeout++;
197 dev_warn(CARD_TO_DEV(card),
198 "No active command associated with timeout!\n");
203 cmd->cb(card, cmd, -ETIMEDOUT);
205 kmem_cache_free(creg_cmd_pool, cmd);
208 spin_lock(&card->creg_ctrl.lock);
209 card->creg_ctrl.active = 0;
210 creg_kick_queue(card);
211 spin_unlock(&card->creg_ctrl.lock);
215 static void creg_cmd_done(struct work_struct *work)
217 struct rsxx_cardinfo *card;
218 struct creg_cmd *cmd;
221 card = container_of(work, struct rsxx_cardinfo,
222 creg_ctrl.done_work);
225 * The timer could not be cancelled for some reason,
226 * race to pop the active command.
228 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
229 card->creg_ctrl.creg_stats.failed_cancel_timer++;
231 spin_lock_bh(&card->creg_ctrl.lock);
232 cmd = card->creg_ctrl.active_cmd;
233 card->creg_ctrl.active_cmd = NULL;
234 spin_unlock_bh(&card->creg_ctrl.lock);
237 dev_err(CARD_TO_DEV(card),
238 "Spurious creg interrupt!\n");
242 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
243 cmd->status = card->creg_ctrl.creg_stats.stat;
244 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
245 dev_err(CARD_TO_DEV(card),
246 "Invalid status on creg command\n");
248 * At this point we're probably reading garbage from HW. Don't
249 * do anything else that could mess up the system and let
250 * the sync function return an error.
254 } else if (cmd->status & CREG_STAT_ERROR) {
258 if ((cmd->op == CREG_OP_READ)) {
259 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
261 /* Paranoid Sanity Checks */
263 dev_err(CARD_TO_DEV(card),
264 "Buffer not given for read.\n");
268 if (cnt8 != cmd->cnt8) {
269 dev_err(CARD_TO_DEV(card),
275 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
280 cmd->cb(card, cmd, st);
282 kmem_cache_free(creg_cmd_pool, cmd);
284 spin_lock_bh(&card->creg_ctrl.lock);
285 card->creg_ctrl.active = 0;
286 creg_kick_queue(card);
287 spin_unlock_bh(&card->creg_ctrl.lock);
290 static void creg_reset(struct rsxx_cardinfo *card)
292 struct creg_cmd *cmd = NULL;
293 struct creg_cmd *tmp;
297 * mutex_trylock is used here because if reset_lock is taken then a
298 * reset is already happening. So, we can just go ahead and return.
300 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
303 card->creg_ctrl.reset = 1;
304 spin_lock_irqsave(&card->irq_lock, flags);
305 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
306 spin_unlock_irqrestore(&card->irq_lock, flags);
308 dev_warn(CARD_TO_DEV(card),
309 "Resetting creg interface for recovery\n");
311 /* Cancel outstanding commands */
312 spin_lock_bh(&card->creg_ctrl.lock);
313 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
314 list_del(&cmd->list);
315 card->creg_ctrl.q_depth--;
317 cmd->cb(card, cmd, -ECANCELED);
318 kmem_cache_free(creg_cmd_pool, cmd);
321 cmd = card->creg_ctrl.active_cmd;
322 card->creg_ctrl.active_cmd = NULL;
324 if (timer_pending(&card->creg_ctrl.cmd_timer))
325 del_timer_sync(&card->creg_ctrl.cmd_timer);
328 cmd->cb(card, cmd, -ECANCELED);
329 kmem_cache_free(creg_cmd_pool, cmd);
331 card->creg_ctrl.active = 0;
333 spin_unlock_bh(&card->creg_ctrl.lock);
335 card->creg_ctrl.reset = 0;
336 spin_lock_irqsave(&card->irq_lock, flags);
337 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
338 spin_unlock_irqrestore(&card->irq_lock, flags);
340 mutex_unlock(&card->creg_ctrl.reset_lock);
343 /* Used for synchronous accesses */
344 struct creg_completion {
345 struct completion *cmd_done;
350 static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
351 struct creg_cmd *cmd,
354 struct creg_completion *cmd_completion;
356 cmd_completion = cmd->cb_private;
357 BUG_ON(!cmd_completion);
359 cmd_completion->st = st;
360 cmd_completion->creg_status = cmd->status;
361 complete(cmd_completion->cmd_done);
364 static int __issue_creg_rw(struct rsxx_cardinfo *card,
370 unsigned int *hw_stat)
372 DECLARE_COMPLETION_ONSTACK(cmd_done);
373 struct creg_completion completion;
374 unsigned long timeout;
377 completion.cmd_done = &cmd_done;
379 completion.creg_status = 0;
381 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
387 * This timeout is necessary for unresponsive hardware. The additional
388 * 20 seconds to used to guarantee that each cregs requests has time to
391 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
392 card->creg_ctrl.q_depth + 20000);
395 * The creg interface is guaranteed to complete. It has a timeout
396 * mechanism that will kick in if hardware does not respond.
398 st = wait_for_completion_timeout(completion.cmd_done, timeout);
401 * This is really bad, because the kernel timer did not
402 * expire and notify us of a timeout!
404 dev_crit(CARD_TO_DEV(card),
405 "cregs timer failed\n");
410 *hw_stat = completion.creg_status;
413 dev_warn(CARD_TO_DEV(card),
414 "creg command failed(%d x%08x)\n",
415 completion.st, addr);
416 return completion.st;
422 static int issue_creg_rw(struct rsxx_cardinfo *card,
429 unsigned int hw_stat;
434 op = read ? CREG_OP_READ : CREG_OP_WRITE;
437 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
439 st = __issue_creg_rw(card, op, addr, xfer,
440 data, stream, &hw_stat);
444 data = (char *)data + xfer;
452 /* ---------------------------- Public API ---------------------------------- */
453 int rsxx_creg_write(struct rsxx_cardinfo *card,
459 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
462 int rsxx_creg_read(struct rsxx_cardinfo *card,
468 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
471 int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
473 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
474 sizeof(*state), state, 0);
477 int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
482 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
483 sizeof(size), &size, 0);
487 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
491 int rsxx_get_num_targets(struct rsxx_cardinfo *card,
492 unsigned int *n_targets)
494 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
495 sizeof(*n_targets), n_targets, 0);
498 int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
501 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
502 sizeof(*capabilities), capabilities, 0);
505 int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
507 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
508 sizeof(cmd), &cmd, 0);
512 /*----------------- HW Log Functions -------------------*/
513 static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
518 * New messages start with "<#>", where # is the log level. Messages
519 * that extend past the log buffer will use the previous level
521 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
523 str += 3; /* Skip past the log level. */
529 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
532 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
535 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
538 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
541 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
544 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
547 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
550 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
553 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
559 * The substrncpy function copies the src string (which includes the
560 * terminating '\0' character), up to the count into the dest pointer.
561 * Returns the number of bytes copied to dest.
563 static int substrncpy(char *dest, const char *src, int count)
575 return max_cnt - count;
579 static void read_hw_log_done(struct rsxx_cardinfo *card,
580 struct creg_cmd *cmd,
592 /* Failed getting the log message */
596 while (off < cmd->cnt8) {
597 log_str = &card->log.buf[card->log.buf_len];
598 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
599 len = substrncpy(log_str, &buf[off], cnt);
602 card->log.buf_len += len;
605 * Flush the log if we've hit the end of a message or if we've
606 * run out of buffer space.
608 if ((log_str[len - 1] == '\0') ||
609 (card->log.buf_len == LOG_BUF_SIZE8)) {
610 if (card->log.buf_len != 1) /* Don't log blank lines. */
611 hw_log_msg(card, card->log.buf,
613 card->log.buf_len = 0;
618 if (cmd->status & CREG_STAT_LOG_PENDING)
619 rsxx_read_hw_log(card);
622 int rsxx_read_hw_log(struct rsxx_cardinfo *card)
626 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
627 sizeof(card->log.tmp), card->log.tmp,
628 1, read_hw_log_done, NULL);
630 dev_err(CARD_TO_DEV(card),
631 "Failed getting log text\n");
636 /*-------------- IOCTL REG Access ------------------*/
637 static int issue_reg_cmd(struct rsxx_cardinfo *card,
638 struct rsxx_reg_access *cmd,
641 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
643 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
644 cmd->stream, &cmd->stat);
647 int rsxx_reg_access(struct rsxx_cardinfo *card,
648 struct rsxx_reg_access __user *ucmd,
651 struct rsxx_reg_access cmd;
654 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
658 if (cmd.cnt > RSXX_MAX_REG_CNT)
661 st = issue_reg_cmd(card, &cmd, read);
665 st = put_user(cmd.stat, &ucmd->stat);
670 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
678 /*------------ Initialization & Setup --------------*/
679 int rsxx_creg_setup(struct rsxx_cardinfo *card)
681 card->creg_ctrl.active_cmd = NULL;
683 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
684 mutex_init(&card->creg_ctrl.reset_lock);
685 INIT_LIST_HEAD(&card->creg_ctrl.queue);
686 spin_lock_init(&card->creg_ctrl.lock);
687 setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
688 (unsigned long) card);
693 void rsxx_creg_destroy(struct rsxx_cardinfo *card)
695 struct creg_cmd *cmd;
696 struct creg_cmd *tmp;
699 /* Cancel outstanding commands */
700 spin_lock_bh(&card->creg_ctrl.lock);
701 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
702 list_del(&cmd->list);
704 cmd->cb(card, cmd, -ECANCELED);
705 kmem_cache_free(creg_cmd_pool, cmd);
710 dev_info(CARD_TO_DEV(card),
711 "Canceled %d queue creg commands\n", cnt);
713 cmd = card->creg_ctrl.active_cmd;
714 card->creg_ctrl.active_cmd = NULL;
716 if (timer_pending(&card->creg_ctrl.cmd_timer))
717 del_timer_sync(&card->creg_ctrl.cmd_timer);
720 cmd->cb(card, cmd, -ECANCELED);
721 dev_info(CARD_TO_DEV(card),
722 "Canceled active creg command\n");
723 kmem_cache_free(creg_cmd_pool, cmd);
725 spin_unlock_bh(&card->creg_ctrl.lock);
727 cancel_work_sync(&card->creg_ctrl.done_work);
731 int rsxx_creg_init(void)
733 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
740 void rsxx_creg_cleanup(void)
742 kmem_cache_destroy(creg_cmd_pool);