ehca_classes.h 10.9 KB
Newer Older
1 2 3 4 5 6 7
/*
 *  IBM eServer eHCA Infiniband device driver for Linux on POWER
 *
 *  Struct definition for eHCA internal structures
 *
 *  Authors: Heiko J Schick <schickhj@de.ibm.com>
 *           Christoph Raisch <raisch@de.ibm.com>
8
 *           Joachim Fenkes <fenkes@de.ibm.com>
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *
 *  Copyright (c) 2005 IBM Corporation
 *
 *  All rights reserved.
 *
 *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
 *  BSD.
 *
 * OpenIB BSD License
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * Redistributions of source code must retain the above copyright notice, this
 * list of conditions and the following disclaimer.
 *
 * Redistributions in binary form must reproduce the above copyright notice,
 * this list of conditions and the following disclaimer in the documentation
 * and/or other materials
 * provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef __EHCA_CLASSES_H__
#define __EHCA_CLASSES_H__

struct ehca_module;
struct ehca_qp;
struct ehca_cq;
struct ehca_eq;
struct ehca_mr;
struct ehca_mw;
struct ehca_pd;
struct ehca_av;

55
#include <linux/wait.h>
56
#include <linux/mutex.h>
57

58 59 60
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>

61 62 63
#ifdef CONFIG_PPC64
#include "ehca_classes_pSeries.h"
#endif
64 65 66
#include "ipz_pt_fn.h"
#include "ehca_qes.h"
#include "ehca_irq.h"
67

68
#define EHCA_EQE_CACHE_SIZE 20
69

70 71 72 73
struct ehca_eqe_cache_entry {
	struct ehca_eqe *eqe;
	struct ehca_cq *cq;
};
74 75 76 77 78 79 80 81 82 83 84 85

struct ehca_eq {
	u32 length;
	struct ipz_queue ipz_queue;
	struct ipz_eq_handle ipz_eq_handle;
	struct work_struct work;
	struct h_galpas galpas;
	int is_initialized;
	struct ehca_pfeq pf;
	spinlock_t spinlock;
	struct tasklet_struct interrupt_task;
	u32 ist;
86 87
	spinlock_t irq_spinlock;
	struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
88 89
};

90 91 92 93 94
struct ehca_sma_attr {
	u16 lid, lmc, sm_sl, sm_lid;
	u16 pkey_tbl_len, pkeys[16];
};

95 96
struct ehca_sport {
	struct ib_cq *ibcq_aqp1;
97
	struct ib_qp *ibqp_sqp[2];
98 99 100 101
	/* lock to serialze modify_qp() calls for sqp in normal
	 * and irq path (when event PORT_ACTIVE is received first time)
	 */
	spinlock_t mod_sqp_lock;
102
	enum ib_port_state port_state;
103
	struct ehca_sma_attr saved_attr;
104 105
};

106 107 108 109
#define HCA_CAP_MR_PGSIZE_4K  0x80000000
#define HCA_CAP_MR_PGSIZE_64K 0x40000000
#define HCA_CAP_MR_PGSIZE_1M  0x20000000
#define HCA_CAP_MR_PGSIZE_16M 0x10000000
110

111 112
struct ehca_shca {
	struct ib_device ib_device;
113
	struct of_device *ofdev;
114 115 116 117 118 119 120 121 122 123
	u8 num_ports;
	int hw_level;
	struct list_head shca_list;
	struct ipz_adapter_handle ipz_hca_handle;
	struct ehca_sport sport[2];
	struct ehca_eq eq;
	struct ehca_eq neq;
	struct ehca_mr *maxmr;
	struct ehca_pd *pd;
	struct h_galpas galpas;
124
	struct mutex modify_mutex;
125
	u64 hca_cap;
126 127
	/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
	u32 hca_cap_mr_pgsize;
128
	int max_mtu;
129 130 131 132 133 134
};

struct ehca_pd {
	struct ib_pd ib_pd;
	struct ipz_pd fw_pd;
	u32 ownpid;
135 136 137 138
	/* small queue mgmt */
	struct mutex lock;
	struct list_head free[2];
	struct list_head full[2];
139 140
};

141 142 143 144 145 146 147
enum ehca_ext_qp_type {
	EQPT_NORMAL    = 0,
	EQPT_LLQP      = 1,
	EQPT_SRQBASE   = 2,
	EQPT_SRQ       = 3,
};

148 149 150 151 152 153 154 155
/* struct to cache modify_qp()'s parms for GSI/SMI qp */
struct ehca_mod_qp_parm {
	int mask;
	struct ib_qp_attr attr;
};

#define EHCA_MOD_QP_PARM_MAX 4

156
struct ehca_qp {
157 158 159 160
	union {
		struct ib_qp ib_qp;
		struct ib_srq ib_srq;
	};
161
	u32 qp_type;
162
	enum ehca_ext_qp_type ext_type;
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
	struct ipz_queue ipz_squeue;
	struct ipz_queue ipz_rqueue;
	struct h_galpas galpas;
	u32 qkey;
	u32 real_qp_num;
	u32 token;
	spinlock_t spinlock_s;
	spinlock_t spinlock_r;
	u32 sq_max_inline_data_size;
	struct ipz_qp_handle ipz_qp_handle;
	struct ehca_pfqp pf;
	struct ib_qp_init_attr init_attr;
	struct ehca_cq *send_cq;
	struct ehca_cq *recv_cq;
	unsigned int sqerr_purgeflag;
	struct hlist_node list_entries;
179 180 181
	/* array to cache modify_qp()'s parms for GSI/SMI qp */
	struct ehca_mod_qp_parm *mod_qp_parm;
	int mod_qp_parm_idx;
182 183 184 185
	/* mmap counter for resources mapped into user space */
	u32 mm_count_squeue;
	u32 mm_count_rqueue;
	u32 mm_count_galpa;
186 187 188 189 190
	/* unsolicited ack circumvention */
	int unsol_ack_circ;
	int mtu_shift;
	u32 message_count;
	u32 packet_count;
191 192
};

193 194 195 196
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
/* must be power of 2 */
#define QP_HASHTAB_LEN 8

struct ehca_cq {
	struct ib_cq ib_cq;
	struct ipz_queue ipz_queue;
	struct h_galpas galpas;
	spinlock_t spinlock;
	u32 cq_number;
	u32 token;
	u32 nr_of_entries;
	struct ipz_cq_handle ipz_cq_handle;
	struct ehca_pfcq pf;
	spinlock_t cb_lock;
	struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
	struct list_head entry;
213 214
	u32 nr_callbacks;   /* #events assigned to cpu by scaling code */
	atomic_t nr_events; /* #events seen */
215
	wait_queue_head_t wait_completion;
216 217
	spinlock_t task_lock;
	u32 ownpid;
218 219 220
	/* mmap counter for resources mapped into user space */
	u32 mm_count_queue;
	u32 mm_count_galpa;
221 222 223 224 225 226 227 228 229 230 231 232
};

enum ehca_mr_flag {
	EHCA_MR_FLAG_FMR = 0x80000000,	 /* FMR, created with ehca_alloc_fmr */
	EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR                           */
};

struct ehca_mr {
	union {
		struct ib_mr ib_mr;	/* must always be first in ehca_mr */
		struct ib_fmr ib_fmr;	/* must always be first in ehca_mr */
	} ib;
233
	struct ib_umem *umem;
234 235 236
	spinlock_t mrlock;

	enum ehca_mr_flag flags;
237 238
	u32 num_kpages;		/* number of kernel pages */
	u32 num_hwpages;	/* number of hw pages to form MR */
239
	u64 hwpage_size;	/* hw page size used for this MR */
240 241
	int acl;		/* ACL (stored here for usage in reregister) */
	u64 *start;		/* virtual start address (stored here for */
242
				/* usage in reregister) */
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	u64 size;		/* size (stored here for usage in reregister) */
	u32 fmr_page_size;	/* page size for FMR */
	u32 fmr_max_pages;	/* max pages for FMR */
	u32 fmr_max_maps;	/* max outstanding maps for FMR */
	u32 fmr_map_cnt;	/* map counter for FMR */
	/* fw specific data */
	struct ipz_mrmw_handle ipz_mr_handle;	/* MR handle for h-calls */
	struct h_galpas galpas;
};

struct ehca_mw {
	struct ib_mw ib_mw;	/* gen2 mw, must always be first in ehca_mw */
	spinlock_t mwlock;

	u8 never_bound;		/* indication MW was never bound */
	struct ipz_mrmw_handle ipz_mw_handle;	/* MW handle for h-calls */
	struct h_galpas galpas;
};

enum ehca_mr_pgi_type {
	EHCA_MR_PGI_PHYS   = 1,  /* type of ehca_reg_phys_mr,
				  * ehca_rereg_phys_mr,
				  * ehca_reg_internal_maxmr */
	EHCA_MR_PGI_USER   = 2,  /* type of ehca_reg_user_mr */
	EHCA_MR_PGI_FMR    = 3   /* type of ehca_map_phys_fmr */
};

struct ehca_mr_pginfo {
	enum ehca_mr_pgi_type type;
272 273
	u64 num_kpages;
	u64 kpage_cnt;
274
	u64 hwpage_size;     /* hw page size used for this MR */
275 276 277
	u64 num_hwpages;     /* number of hw pages */
	u64 hwpage_cnt;      /* counter for hw pages */
	u64 next_hwpage;     /* next hw page in buffer/chunk/listelem */
278

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	union {
		struct { /* type EHCA_MR_PGI_PHYS section */
			int num_phys_buf;
			struct ib_phys_buf *phys_buf_array;
			u64 next_buf;
		} phy;
		struct { /* type EHCA_MR_PGI_USER section */
			struct ib_umem *region;
			struct ib_umem_chunk *next_chunk;
			u64 next_nmap;
		} usr;
		struct { /* type EHCA_MR_PGI_FMR section */
			u64 fmr_pgsize;
			u64 *page_list;
			u64 next_listelem;
		} fmr;
	} u;
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
};

/* output parameters for MR/FMR hipz calls */
struct ehca_mr_hipzout_parms {
	struct ipz_mrmw_handle handle;
	u32 lkey;
	u32 rkey;
	u64 len;
	u64 vaddr;
	u32 acl;
};

/* output parameters for MW hipz calls */
struct ehca_mw_hipzout_parms {
	struct ipz_mrmw_handle handle;
	u32 rkey;
};

struct ehca_av {
	struct ib_ah ib_ah;
	struct ehca_ud_av av;
};

struct ehca_ucontext {
	struct ib_ucontext ib_ucontext;
};

int ehca_init_pd_cache(void);
void ehca_cleanup_pd_cache(void);
int ehca_init_cq_cache(void);
void ehca_cleanup_cq_cache(void);
int ehca_init_qp_cache(void);
void ehca_cleanup_qp_cache(void);
int ehca_init_av_cache(void);
void ehca_cleanup_av_cache(void);
int ehca_init_mrmw_cache(void);
void ehca_cleanup_mrmw_cache(void);
333 334
int ehca_init_small_qp_cache(void);
void ehca_cleanup_small_qp_cache(void);
335

336 337
extern rwlock_t ehca_qp_idr_lock;
extern rwlock_t ehca_cq_idr_lock;
338 339 340 341 342 343
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;

extern int ehca_static_rate;
extern int ehca_port_act_time;
extern int ehca_use_hp_mr;
344
extern int ehca_scaling_code;
345
extern int ehca_lock_hcalls;
346
extern int ehca_nr_ports;
347 348 349 350 351 352 353

struct ipzu_queue_resp {
	u32 qe_size;      /* queue entry size */
	u32 act_nr_of_sg;
	u32 queue_length; /* queue length allocated in bytes */
	u32 pagesize;
	u32 toggle_state;
354
	u32 offset; /* save offset within a page for small_qp */
355 356 357 358 359 360
};

struct ehca_create_cq_resp {
	u32 cq_number;
	u32 token;
	struct ipzu_queue_resp ipz_queue;
361 362
	u32 fw_handle_ofs;
	u32 dummy;
363 364 365 366 367 368
};

struct ehca_create_qp_resp {
	u32 qp_num;
	u32 token;
	u32 qp_type;
369
	u32 ext_type;
370 371 372
	u32 qkey;
	/* qp_num assigned by ehca: sqp0/1 may have got different numbers */
	u32 real_qp_num;
373
	u32 fw_handle_ofs;
374
	u32 dummy;
375 376 377 378 379 380 381 382 383 384 385
	struct ipzu_queue_resp ipz_squeue;
	struct ipzu_queue_resp ipz_rqueue;
};

struct ehca_alloc_cq_parms {
	u32 nr_cqe;
	u32 act_nr_of_entries;
	u32 act_pages;
	struct ipz_eq_handle eq_handle;
};

386 387 388 389 390 391 392 393 394 395 396 397 398
enum ehca_service_type {
	ST_RC  = 0,
	ST_UC  = 1,
	ST_RD  = 2,
	ST_UD  = 3,
};

enum ehca_ll_comp_flags {
	LLQP_SEND_COMP = 0x20,
	LLQP_RECV_COMP = 0x40,
	LLQP_COMP_MASK = 0x60,
};

399 400 401 402 403 404 405 406 407 408 409 410 411
struct ehca_alloc_queue_parms {
	/* input parameters */
	int max_wr;
	int max_sge;
	int page_size;
	int is_small;

	/* output parameters */
	u16 act_nr_wqes;
	u8  act_nr_sges;
	u32 queue_size; /* bytes for small queues, pages otherwise */
};

412
struct ehca_alloc_qp_parms {
413 414 415 416
	struct ehca_alloc_queue_parms squeue;
	struct ehca_alloc_queue_parms rqueue;

	/* input parameters */
417
	enum ehca_service_type servicetype;
418
	int qp_storage;
419
	int sigtype;
420 421
	enum ehca_ext_qp_type ext_type;
	enum ehca_ll_comp_flags ll_comp_flags;
422 423
	int ud_av_l_key_ctl;

424 425 426 427 428 429 430
	u32 token;
	struct ipz_eq_handle eq_handle;
	struct ipz_pd pd;
	struct ipz_cq_handle send_cq_handle, recv_cq_handle;

	u32 srq_qpn, srq_token, srq_limit;

431
	/* output parameters */
432 433 434
	u32 real_qp_num;
	struct ipz_qp_handle qp_handle;
	struct h_galpas galpas;
435 436 437 438
};

int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
439
struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
440 441

#endif