1 /*
2  * Copyright (C) 2005, 2006
3  * Avishay Traeger (avishay@gmail.com)
4  * Copyright (C) 2008, 2009
5  * Boaz Harrosh <bharrosh@panasas.com>
6  *
7  * Copyrights for code taken from ext2:
8  *     Copyright (C) 1992, 1993, 1994, 1995
9  *     Remy Card (card@masi.ibp.fr)
10  *     Laboratoire MASI - Institut Blaise Pascal
11  *     Universite Pierre et Marie Curie (Paris VI)
12  *     from
13  *     linux/fs/minix/inode.c
14  *     Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  * This file is part of exofs.
17  *
18  * exofs is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation.  Since it is based on ext2, and the only
21  * valid version of GPL for the Linux kernel is version 2, the only valid
22  * version of GPL for exofs is version 2.
23  *
24  * exofs is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY; without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with exofs; if not, write to the Free Software
31  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
32  */
33 
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/vfs.h>
37 #include <linux/random.h>
38 #include <linux/module.h>
39 #include <linux/exportfs.h>
40 #include <linux/slab.h>
41 
42 #include "exofs.h"
43 
44 #define EXOFS_DBGMSG2(M...) do {} while (0)
45 
46 /******************************************************************************
47  * MOUNT OPTIONS
48  *****************************************************************************/
49 
50 /*
51  * struct to hold what we get from mount options
52  */
53 struct exofs_mountopt {
54 	bool is_osdname;
55 	const char *dev_name;
56 	uint64_t pid;
57 	int timeout;
58 };
59 
60 /*
61  * exofs-specific mount-time options.
62  */
63 enum { Opt_name, Opt_pid, Opt_to, Opt_err };
64 
65 /*
66  * Our mount-time options.  These should ideally be 64-bit unsigned, but the
67  * kernel's parsing functions do not currently support that.  32-bit should be
68  * sufficient for most applications now.
69  */
70 static match_table_t tokens = {
71 	{Opt_name, "osdname=%s"},
72 	{Opt_pid, "pid=%u"},
73 	{Opt_to, "to=%u"},
74 	{Opt_err, NULL}
75 };
76 
77 /*
78  * The main option parsing method.  Also makes sure that all of the mandatory
79  * mount options were set.
80  */
parse_options(char * options,struct exofs_mountopt * opts)81 static int parse_options(char *options, struct exofs_mountopt *opts)
82 {
83 	char *p;
84 	substring_t args[MAX_OPT_ARGS];
85 	int option;
86 	bool s_pid = false;
87 
88 	EXOFS_DBGMSG("parse_options %s\n", options);
89 	/* defaults */
90 	memset(opts, 0, sizeof(*opts));
91 	opts->timeout = BLK_DEFAULT_SG_TIMEOUT;
92 
93 	while ((p = strsep(&options, ",")) != NULL) {
94 		int token;
95 		char str[32];
96 
97 		if (!*p)
98 			continue;
99 
100 		token = match_token(p, tokens, args);
101 		switch (token) {
102 		case Opt_name:
103 			opts->dev_name = match_strdup(&args[0]);
104 			if (unlikely(!opts->dev_name)) {
105 				EXOFS_ERR("Error allocating dev_name");
106 				return -ENOMEM;
107 			}
108 			opts->is_osdname = true;
109 			break;
110 		case Opt_pid:
111 			if (0 == match_strlcpy(str, &args[0], sizeof(str)))
112 				return -EINVAL;
113 			opts->pid = simple_strtoull(str, NULL, 0);
114 			if (opts->pid < EXOFS_MIN_PID) {
115 				EXOFS_ERR("Partition ID must be >= %u",
116 					  EXOFS_MIN_PID);
117 				return -EINVAL;
118 			}
119 			s_pid = 1;
120 			break;
121 		case Opt_to:
122 			if (match_int(&args[0], &option))
123 				return -EINVAL;
124 			if (option <= 0) {
125 				EXOFS_ERR("Timout must be > 0");
126 				return -EINVAL;
127 			}
128 			opts->timeout = option * HZ;
129 			break;
130 		}
131 	}
132 
133 	if (!s_pid) {
134 		EXOFS_ERR("Need to specify the following options:\n");
135 		EXOFS_ERR("    -o pid=pid_no_to_use\n");
136 		return -EINVAL;
137 	}
138 
139 	return 0;
140 }
141 
142 /******************************************************************************
143  * INODE CACHE
144  *****************************************************************************/
145 
146 /*
147  * Our inode cache.  Isn't it pretty?
148  */
149 static struct kmem_cache *exofs_inode_cachep;
150 
151 /*
152  * Allocate an inode in the cache
153  */
exofs_alloc_inode(struct super_block * sb)154 static struct inode *exofs_alloc_inode(struct super_block *sb)
155 {
156 	struct exofs_i_info *oi;
157 
158 	oi = kmem_cache_alloc(exofs_inode_cachep, GFP_KERNEL);
159 	if (!oi)
160 		return NULL;
161 
162 	oi->vfs_inode.i_version = 1;
163 	return &oi->vfs_inode;
164 }
165 
exofs_i_callback(struct rcu_head * head)166 static void exofs_i_callback(struct rcu_head *head)
167 {
168 	struct inode *inode = container_of(head, struct inode, i_rcu);
169 	kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
170 }
171 
172 /*
173  * Remove an inode from the cache
174  */
exofs_destroy_inode(struct inode * inode)175 static void exofs_destroy_inode(struct inode *inode)
176 {
177 	call_rcu(&inode->i_rcu, exofs_i_callback);
178 }
179 
180 /*
181  * Initialize the inode
182  */
exofs_init_once(void * foo)183 static void exofs_init_once(void *foo)
184 {
185 	struct exofs_i_info *oi = foo;
186 
187 	inode_init_once(&oi->vfs_inode);
188 }
189 
190 /*
191  * Create and initialize the inode cache
192  */
init_inodecache(void)193 static int init_inodecache(void)
194 {
195 	exofs_inode_cachep = kmem_cache_create("exofs_inode_cache",
196 				sizeof(struct exofs_i_info), 0,
197 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
198 				exofs_init_once);
199 	if (exofs_inode_cachep == NULL)
200 		return -ENOMEM;
201 	return 0;
202 }
203 
204 /*
205  * Destroy the inode cache
206  */
destroy_inodecache(void)207 static void destroy_inodecache(void)
208 {
209 	kmem_cache_destroy(exofs_inode_cachep);
210 }
211 
212 /******************************************************************************
213  * Some osd helpers
214  *****************************************************************************/
exofs_make_credential(u8 cred_a[OSD_CAP_LEN],const struct osd_obj_id * obj)215 void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj)
216 {
217 	osd_sec_init_nosec_doall_caps(cred_a, obj, false, true);
218 }
219 
exofs_read_kern(struct osd_dev * od,u8 * cred,struct osd_obj_id * obj,u64 offset,void * p,unsigned length)220 static int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
221 		    u64 offset, void *p, unsigned length)
222 {
223 	struct osd_request *or = osd_start_request(od, GFP_KERNEL);
224 /*	struct osd_sense_info osi = {.key = 0};*/
225 	int ret;
226 
227 	if (unlikely(!or)) {
228 		EXOFS_DBGMSG("%s: osd_start_request failed.\n", __func__);
229 		return -ENOMEM;
230 	}
231 	ret = osd_req_read_kern(or, obj, offset, p, length);
232 	if (unlikely(ret)) {
233 		EXOFS_DBGMSG("%s: osd_req_read_kern failed.\n", __func__);
234 		goto out;
235 	}
236 
237 	ret = osd_finalize_request(or, 0, cred, NULL);
238 	if (unlikely(ret)) {
239 		EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
240 		goto out;
241 	}
242 
243 	ret = osd_execute_request(or);
244 	if (unlikely(ret))
245 		EXOFS_DBGMSG("osd_execute_request() => %d\n", ret);
246 	/* osd_req_decode_sense(or, ret); */
247 
248 out:
249 	osd_end_request(or);
250 	EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx "
251 		      "length=0x%llx dev=%p ret=>%d\n",
252 		      _LLU(obj->id), _LLU(offset), _LLU(length), od, ret);
253 	return ret;
254 }
255 
256 static const struct osd_attr g_attr_sb_stats = ATTR_DEF(
257 	EXOFS_APAGE_SB_DATA,
258 	EXOFS_ATTR_SB_STATS,
259 	sizeof(struct exofs_sb_stats));
260 
__sbi_read_stats(struct exofs_sb_info * sbi)261 static int __sbi_read_stats(struct exofs_sb_info *sbi)
262 {
263 	struct osd_attr attrs[] = {
264 		[0] = g_attr_sb_stats,
265 	};
266 	struct ore_io_state *ios;
267 	int ret;
268 
269 	ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
270 	if (unlikely(ret)) {
271 		EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
272 		return ret;
273 	}
274 
275 	ios->in_attr = attrs;
276 	ios->in_attr_len = ARRAY_SIZE(attrs);
277 
278 	ret = ore_read(ios);
279 	if (unlikely(ret)) {
280 		EXOFS_ERR("Error reading super_block stats => %d\n", ret);
281 		goto out;
282 	}
283 
284 	ret = extract_attr_from_ios(ios, &attrs[0]);
285 	if (ret) {
286 		EXOFS_ERR("%s: extract_attr of sb_stats failed\n", __func__);
287 		goto out;
288 	}
289 	if (attrs[0].len) {
290 		struct exofs_sb_stats *ess;
291 
292 		if (unlikely(attrs[0].len != sizeof(*ess))) {
293 			EXOFS_ERR("%s: Wrong version of exofs_sb_stats "
294 				  "size(%d) != expected(%zd)\n",
295 				  __func__, attrs[0].len, sizeof(*ess));
296 			goto out;
297 		}
298 
299 		ess = attrs[0].val_ptr;
300 		sbi->s_nextid = le64_to_cpu(ess->s_nextid);
301 		sbi->s_numfiles = le32_to_cpu(ess->s_numfiles);
302 	}
303 
304 out:
305 	ore_put_io_state(ios);
306 	return ret;
307 }
308 
stats_done(struct ore_io_state * ios,void * p)309 static void stats_done(struct ore_io_state *ios, void *p)
310 {
311 	ore_put_io_state(ios);
312 	/* Good thanks nothing to do anymore */
313 }
314 
315 /* Asynchronously write the stats attribute */
exofs_sbi_write_stats(struct exofs_sb_info * sbi)316 int exofs_sbi_write_stats(struct exofs_sb_info *sbi)
317 {
318 	struct osd_attr attrs[] = {
319 		[0] = g_attr_sb_stats,
320 	};
321 	struct ore_io_state *ios;
322 	int ret;
323 
324 	ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
325 	if (unlikely(ret)) {
326 		EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
327 		return ret;
328 	}
329 
330 	sbi->s_ess.s_nextid   = cpu_to_le64(sbi->s_nextid);
331 	sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles);
332 	attrs[0].val_ptr = &sbi->s_ess;
333 
334 
335 	ios->done = stats_done;
336 	ios->private = sbi;
337 	ios->out_attr = attrs;
338 	ios->out_attr_len = ARRAY_SIZE(attrs);
339 
340 	ret = ore_write(ios);
341 	if (unlikely(ret)) {
342 		EXOFS_ERR("%s: ore_write failed.\n", __func__);
343 		ore_put_io_state(ios);
344 	}
345 
346 	return ret;
347 }
348 
349 /******************************************************************************
350  * SUPERBLOCK FUNCTIONS
351  *****************************************************************************/
352 static const struct super_operations exofs_sops;
353 static const struct export_operations exofs_export_ops;
354 
355 /*
356  * Write the superblock to the OSD
357  */
exofs_sync_fs(struct super_block * sb,int wait)358 static int exofs_sync_fs(struct super_block *sb, int wait)
359 {
360 	struct exofs_sb_info *sbi;
361 	struct exofs_fscb *fscb;
362 	struct ore_comp one_comp;
363 	struct ore_components oc;
364 	struct ore_io_state *ios;
365 	int ret = -ENOMEM;
366 
367 	fscb = kmalloc(sizeof(*fscb), GFP_KERNEL);
368 	if (unlikely(!fscb))
369 		return -ENOMEM;
370 
371 	sbi = sb->s_fs_info;
372 
373 	/* NOTE: We no longer dirty the super_block anywhere in exofs. The
374 	 * reason we write the fscb here on unmount is so we can stay backwards
375 	 * compatible with fscb->s_version == 1. (What we are not compatible
376 	 * with is if a new version FS crashed and then we try to mount an old
377 	 * version). Otherwise the exofs_fscb is read-only from mkfs time. All
378 	 * the writeable info is set in exofs_sbi_write_stats() above.
379 	 */
380 
381 	exofs_init_comps(&oc, &one_comp, sbi, EXOFS_SUPER_ID);
382 
383 	ret = ore_get_io_state(&sbi->layout, &oc, &ios);
384 	if (unlikely(ret))
385 		goto out;
386 
387 	lock_super(sb);
388 
389 	ios->length = offsetof(struct exofs_fscb, s_dev_table_oid);
390 	memset(fscb, 0, ios->length);
391 	fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
392 	fscb->s_numfiles = cpu_to_le32(sbi->s_numfiles);
393 	fscb->s_magic = cpu_to_le16(sb->s_magic);
394 	fscb->s_newfs = 0;
395 	fscb->s_version = EXOFS_FSCB_VER;
396 
397 	ios->offset = 0;
398 	ios->kern_buff = fscb;
399 
400 	ret = ore_write(ios);
401 	if (unlikely(ret))
402 		EXOFS_ERR("%s: ore_write failed.\n", __func__);
403 	else
404 		sb->s_dirt = 0;
405 
406 
407 	unlock_super(sb);
408 out:
409 	EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
410 	ore_put_io_state(ios);
411 	kfree(fscb);
412 	return ret;
413 }
414 
exofs_write_super(struct super_block * sb)415 static void exofs_write_super(struct super_block *sb)
416 {
417 	if (!(sb->s_flags & MS_RDONLY))
418 		exofs_sync_fs(sb, 1);
419 	else
420 		sb->s_dirt = 0;
421 }
422 
_exofs_print_device(const char * msg,const char * dev_path,struct osd_dev * od,u64 pid)423 static void _exofs_print_device(const char *msg, const char *dev_path,
424 				struct osd_dev *od, u64 pid)
425 {
426 	const struct osd_dev_info *odi = osduld_device_info(od);
427 
428 	printk(KERN_NOTICE "exofs: %s %s osd_name-%s pid-0x%llx\n",
429 		msg, dev_path ?: "", odi->osdname, _LLU(pid));
430 }
431 
exofs_free_sbi(struct exofs_sb_info * sbi)432 static void exofs_free_sbi(struct exofs_sb_info *sbi)
433 {
434 	unsigned numdevs = sbi->oc.numdevs;
435 
436 	while (numdevs) {
437 		unsigned i = --numdevs;
438 		struct osd_dev *od = ore_comp_dev(&sbi->oc, i);
439 
440 		if (od) {
441 			ore_comp_set_dev(&sbi->oc, i, NULL);
442 			osduld_put_device(od);
443 		}
444 	}
445 	kfree(sbi->oc.ods);
446 	kfree(sbi);
447 }
448 
449 /*
450  * This function is called when the vfs is freeing the superblock.  We just
451  * need to free our own part.
452  */
exofs_put_super(struct super_block * sb)453 static void exofs_put_super(struct super_block *sb)
454 {
455 	int num_pend;
456 	struct exofs_sb_info *sbi = sb->s_fs_info;
457 
458 	/* make sure there are no pending commands */
459 	for (num_pend = atomic_read(&sbi->s_curr_pending); num_pend > 0;
460 	     num_pend = atomic_read(&sbi->s_curr_pending)) {
461 		wait_queue_head_t wq;
462 
463 		printk(KERN_NOTICE "%s: !!Pending operations in flight. "
464 		       "This is a BUG. please report to osd-dev@open-osd.org\n",
465 		       __func__);
466 		init_waitqueue_head(&wq);
467 		wait_event_timeout(wq,
468 				  (atomic_read(&sbi->s_curr_pending) == 0),
469 				  msecs_to_jiffies(100));
470 	}
471 
472 	_exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
473 			    sbi->one_comp.obj.partition);
474 
475 	bdi_destroy(&sbi->bdi);
476 	exofs_free_sbi(sbi);
477 	sb->s_fs_info = NULL;
478 }
479 
_read_and_match_data_map(struct exofs_sb_info * sbi,unsigned numdevs,struct exofs_device_table * dt)480 static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs,
481 				    struct exofs_device_table *dt)
482 {
483 	int ret;
484 
485 	sbi->layout.stripe_unit =
486 				le64_to_cpu(dt->dt_data_map.cb_stripe_unit);
487 	sbi->layout.group_width =
488 				le32_to_cpu(dt->dt_data_map.cb_group_width);
489 	sbi->layout.group_depth =
490 				le32_to_cpu(dt->dt_data_map.cb_group_depth);
491 	sbi->layout.mirrors_p1  =
492 				le32_to_cpu(dt->dt_data_map.cb_mirror_cnt) + 1;
493 	sbi->layout.raid_algorithm  =
494 				le32_to_cpu(dt->dt_data_map.cb_raid_algorithm);
495 
496 	ret = ore_verify_layout(numdevs, &sbi->layout);
497 
498 	EXOFS_DBGMSG("exofs: layout: "
499 		"num_comps=%u stripe_unit=0x%x group_width=%u "
500 		"group_depth=0x%llx mirrors_p1=%u raid_algorithm=%u\n",
501 		numdevs,
502 		sbi->layout.stripe_unit,
503 		sbi->layout.group_width,
504 		_LLU(sbi->layout.group_depth),
505 		sbi->layout.mirrors_p1,
506 		sbi->layout.raid_algorithm);
507 	return ret;
508 }
509 
__ra_pages(struct ore_layout * layout)510 static unsigned __ra_pages(struct ore_layout *layout)
511 {
512 	const unsigned _MIN_RA = 32; /* min 128K read-ahead */
513 	unsigned ra_pages = layout->group_width * layout->stripe_unit /
514 				PAGE_SIZE;
515 	unsigned max_io_pages = exofs_max_io_pages(layout, ~0);
516 
517 	ra_pages *= 2; /* two stripes */
518 	if (ra_pages < _MIN_RA)
519 		ra_pages = roundup(_MIN_RA, ra_pages / 2);
520 
521 	if (ra_pages > max_io_pages)
522 		ra_pages = max_io_pages;
523 
524 	return ra_pages;
525 }
526 
527 /* @odi is valid only as long as @fscb_dev is valid */
exofs_devs_2_odi(struct exofs_dt_device_info * dt_dev,struct osd_dev_info * odi)528 static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
529 			     struct osd_dev_info *odi)
530 {
531 	odi->systemid_len = le32_to_cpu(dt_dev->systemid_len);
532 	memcpy(odi->systemid, dt_dev->systemid, odi->systemid_len);
533 
534 	odi->osdname_len = le32_to_cpu(dt_dev->osdname_len);
535 	odi->osdname = dt_dev->osdname;
536 
537 	/* FIXME support long names. Will need a _put function */
538 	if (dt_dev->long_name_offset)
539 		return -EINVAL;
540 
541 	/* Make sure osdname is printable!
542 	 * mkexofs should give us space for a null-terminator else the
543 	 * device-table is invalid.
544 	 */
545 	if (unlikely(odi->osdname_len >= sizeof(dt_dev->osdname)))
546 		odi->osdname_len = sizeof(dt_dev->osdname) - 1;
547 	dt_dev->osdname[odi->osdname_len] = 0;
548 
549 	/* If it's all zeros something is bad we read past end-of-obj */
550 	return !(odi->systemid_len || odi->osdname_len);
551 }
552 
__alloc_dev_table(struct exofs_sb_info * sbi,unsigned numdevs,struct exofs_dev ** peds)553 int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
554 		      struct exofs_dev **peds)
555 {
556 	struct __alloc_ore_devs_and_exofs_devs {
557 		/* Twice bigger table: See exofs_init_comps() and comment at
558 		 * exofs_read_lookup_dev_table()
559 		 */
560 		struct ore_dev *oreds[numdevs * 2 - 1];
561 		struct exofs_dev eds[numdevs];
562 	} *aoded;
563 	struct exofs_dev *eds;
564 	unsigned i;
565 
566 	aoded = kzalloc(sizeof(*aoded), GFP_KERNEL);
567 	if (unlikely(!aoded)) {
568 		EXOFS_ERR("ERROR: faild allocating Device array[%d]\n",
569 			  numdevs);
570 		return -ENOMEM;
571 	}
572 
573 	sbi->oc.ods = aoded->oreds;
574 	*peds = eds = aoded->eds;
575 	for (i = 0; i < numdevs; ++i)
576 		aoded->oreds[i] = &eds[i].ored;
577 	return 0;
578 }
579 
exofs_read_lookup_dev_table(struct exofs_sb_info * sbi,struct osd_dev * fscb_od,unsigned table_count)580 static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
581 				       struct osd_dev *fscb_od,
582 				       unsigned table_count)
583 {
584 	struct ore_comp comp;
585 	struct exofs_device_table *dt;
586 	struct exofs_dev *eds;
587 	unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) +
588 					     sizeof(*dt);
589 	unsigned numdevs, i;
590 	int ret;
591 
592 	dt = kmalloc(table_bytes, GFP_KERNEL);
593 	if (unlikely(!dt)) {
594 		EXOFS_ERR("ERROR: allocating %x bytes for device table\n",
595 			  table_bytes);
596 		return -ENOMEM;
597 	}
598 
599 	sbi->oc.numdevs = 0;
600 
601 	comp.obj.partition = sbi->one_comp.obj.partition;
602 	comp.obj.id = EXOFS_DEVTABLE_ID;
603 	exofs_make_credential(comp.cred, &comp.obj);
604 
605 	ret = exofs_read_kern(fscb_od, comp.cred, &comp.obj, 0, dt,
606 			      table_bytes);
607 	if (unlikely(ret)) {
608 		EXOFS_ERR("ERROR: reading device table\n");
609 		goto out;
610 	}
611 
612 	numdevs = le64_to_cpu(dt->dt_num_devices);
613 	if (unlikely(!numdevs)) {
614 		ret = -EINVAL;
615 		goto out;
616 	}
617 	WARN_ON(table_count != numdevs);
618 
619 	ret = _read_and_match_data_map(sbi, numdevs, dt);
620 	if (unlikely(ret))
621 		goto out;
622 
623 	ret = __alloc_dev_table(sbi, numdevs, &eds);
624 	if (unlikely(ret))
625 		goto out;
626 	/* exofs round-robins the device table view according to inode
627 	 * number. We hold a: twice bigger table hence inodes can point
628 	 * to any device and have a sequential view of the table
629 	 * starting at this device. See exofs_init_comps()
630 	 */
631 	memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
632 		(numdevs - 1) * sizeof(sbi->oc.ods[0]));
633 
634 	for (i = 0; i < numdevs; i++) {
635 		struct exofs_fscb fscb;
636 		struct osd_dev_info odi;
637 		struct osd_dev *od;
638 
639 		if (exofs_devs_2_odi(&dt->dt_dev_table[i], &odi)) {
640 			EXOFS_ERR("ERROR: Read all-zeros device entry\n");
641 			ret = -EINVAL;
642 			goto out;
643 		}
644 
645 		printk(KERN_NOTICE "Add device[%d]: osd_name-%s\n",
646 		       i, odi.osdname);
647 
648 		/* the exofs id is currently the table index */
649 		eds[i].did = i;
650 
651 		/* On all devices the device table is identical. The user can
652 		 * specify any one of the participating devices on the command
653 		 * line. We always keep them in device-table order.
654 		 */
655 		if (fscb_od && osduld_device_same(fscb_od, &odi)) {
656 			eds[i].ored.od = fscb_od;
657 			++sbi->oc.numdevs;
658 			fscb_od = NULL;
659 			continue;
660 		}
661 
662 		od = osduld_info_lookup(&odi);
663 		if (IS_ERR(od)) {
664 			ret = PTR_ERR(od);
665 			EXOFS_ERR("ERROR: device requested is not found "
666 				  "osd_name-%s =>%d\n", odi.osdname, ret);
667 			goto out;
668 		}
669 
670 		eds[i].ored.od = od;
671 		++sbi->oc.numdevs;
672 
673 		/* Read the fscb of the other devices to make sure the FS
674 		 * partition is there.
675 		 */
676 		ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb,
677 				      sizeof(fscb));
678 		if (unlikely(ret)) {
679 			EXOFS_ERR("ERROR: Malformed participating device "
680 				  "error reading fscb osd_name-%s\n",
681 				  odi.osdname);
682 			goto out;
683 		}
684 
685 		/* TODO: verify other information is correct and FS-uuid
686 		 *	 matches. Benny what did you say about device table
687 		 *	 generation and old devices?
688 		 */
689 	}
690 
691 out:
692 	kfree(dt);
693 	if (unlikely(fscb_od && !ret)) {
694 			EXOFS_ERR("ERROR: Bad device-table container device not present\n");
695 			osduld_put_device(fscb_od);
696 			return -EINVAL;
697 	}
698 	return ret;
699 }
700 
701 /*
702  * Read the superblock from the OSD and fill in the fields
703  */
exofs_fill_super(struct super_block * sb,void * data,int silent)704 static int exofs_fill_super(struct super_block *sb, void *data, int silent)
705 {
706 	struct inode *root;
707 	struct exofs_mountopt *opts = data;
708 	struct exofs_sb_info *sbi;	/*extended info                  */
709 	struct osd_dev *od;		/* Master device                 */
710 	struct exofs_fscb fscb;		/*on-disk superblock info        */
711 	struct ore_comp comp;
712 	unsigned table_count;
713 	int ret;
714 
715 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
716 	if (!sbi)
717 		return -ENOMEM;
718 
719 	/* use mount options to fill superblock */
720 	if (opts->is_osdname) {
721 		struct osd_dev_info odi = {.systemid_len = 0};
722 
723 		odi.osdname_len = strlen(opts->dev_name);
724 		odi.osdname = (u8 *)opts->dev_name;
725 		od = osduld_info_lookup(&odi);
726 		kfree(opts->dev_name);
727 		opts->dev_name = NULL;
728 	} else {
729 		od = osduld_path_lookup(opts->dev_name);
730 	}
731 	if (IS_ERR(od)) {
732 		ret = -EINVAL;
733 		goto free_sbi;
734 	}
735 
736 	/* Default layout in case we do not have a device-table */
737 	sbi->layout.stripe_unit = PAGE_SIZE;
738 	sbi->layout.mirrors_p1 = 1;
739 	sbi->layout.group_width = 1;
740 	sbi->layout.group_depth = -1;
741 	sbi->layout.group_count = 1;
742 	sbi->s_timeout = opts->timeout;
743 
744 	sbi->one_comp.obj.partition = opts->pid;
745 	sbi->one_comp.obj.id = 0;
746 	exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
747 	sbi->oc.numdevs = 1;
748 	sbi->oc.single_comp = EC_SINGLE_COMP;
749 	sbi->oc.comps = &sbi->one_comp;
750 
751 	/* fill in some other data by hand */
752 	memset(sb->s_id, 0, sizeof(sb->s_id));
753 	strcpy(sb->s_id, "exofs");
754 	sb->s_blocksize = EXOFS_BLKSIZE;
755 	sb->s_blocksize_bits = EXOFS_BLKSHIFT;
756 	sb->s_maxbytes = MAX_LFS_FILESIZE;
757 	atomic_set(&sbi->s_curr_pending, 0);
758 	sb->s_bdev = NULL;
759 	sb->s_dev = 0;
760 
761 	comp.obj.partition = sbi->one_comp.obj.partition;
762 	comp.obj.id = EXOFS_SUPER_ID;
763 	exofs_make_credential(comp.cred, &comp.obj);
764 
765 	ret = exofs_read_kern(od, comp.cred, &comp.obj, 0, &fscb, sizeof(fscb));
766 	if (unlikely(ret))
767 		goto free_sbi;
768 
769 	sb->s_magic = le16_to_cpu(fscb.s_magic);
770 	/* NOTE: we read below to be backward compatible with old versions */
771 	sbi->s_nextid = le64_to_cpu(fscb.s_nextid);
772 	sbi->s_numfiles = le32_to_cpu(fscb.s_numfiles);
773 
774 	/* make sure what we read from the object store is correct */
775 	if (sb->s_magic != EXOFS_SUPER_MAGIC) {
776 		if (!silent)
777 			EXOFS_ERR("ERROR: Bad magic value\n");
778 		ret = -EINVAL;
779 		goto free_sbi;
780 	}
781 	if (le32_to_cpu(fscb.s_version) > EXOFS_FSCB_VER) {
782 		EXOFS_ERR("ERROR: Bad FSCB version expected-%d got-%d\n",
783 			  EXOFS_FSCB_VER, le32_to_cpu(fscb.s_version));
784 		ret = -EINVAL;
785 		goto free_sbi;
786 	}
787 
788 	/* start generation numbers from a random point */
789 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
790 	spin_lock_init(&sbi->s_next_gen_lock);
791 
792 	table_count = le64_to_cpu(fscb.s_dev_table_count);
793 	if (table_count) {
794 		ret = exofs_read_lookup_dev_table(sbi, od, table_count);
795 		if (unlikely(ret))
796 			goto free_sbi;
797 	} else {
798 		struct exofs_dev *eds;
799 
800 		ret = __alloc_dev_table(sbi, 1, &eds);
801 		if (unlikely(ret))
802 			goto free_sbi;
803 
804 		ore_comp_set_dev(&sbi->oc, 0, od);
805 	}
806 
807 	__sbi_read_stats(sbi);
808 
809 	/* set up operation vectors */
810 	sbi->bdi.ra_pages = __ra_pages(&sbi->layout);
811 	sb->s_bdi = &sbi->bdi;
812 	sb->s_fs_info = sbi;
813 	sb->s_op = &exofs_sops;
814 	sb->s_export_op = &exofs_export_ops;
815 	root = exofs_iget(sb, EXOFS_ROOT_ID - EXOFS_OBJ_OFF);
816 	if (IS_ERR(root)) {
817 		EXOFS_ERR("ERROR: exofs_iget failed\n");
818 		ret = PTR_ERR(root);
819 		goto free_sbi;
820 	}
821 	sb->s_root = d_alloc_root(root);
822 	if (!sb->s_root) {
823 		iput(root);
824 		EXOFS_ERR("ERROR: get root inode failed\n");
825 		ret = -ENOMEM;
826 		goto free_sbi;
827 	}
828 
829 	if (!S_ISDIR(root->i_mode)) {
830 		dput(sb->s_root);
831 		sb->s_root = NULL;
832 		EXOFS_ERR("ERROR: corrupt root inode (mode = %hd)\n",
833 		       root->i_mode);
834 		ret = -EINVAL;
835 		goto free_sbi;
836 	}
837 
838 	ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
839 	if (ret) {
840 		EXOFS_DBGMSG("Failed to bdi_setup_and_register\n");
841 		dput(sb->s_root);
842 		sb->s_root = NULL;
843 		goto free_sbi;
844 	}
845 
846 	_exofs_print_device("Mounting", opts->dev_name,
847 			    ore_comp_dev(&sbi->oc, 0),
848 			    sbi->one_comp.obj.partition);
849 	return 0;
850 
851 free_sbi:
852 	EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
853 		  opts->dev_name, sbi->one_comp.obj.partition, ret);
854 	exofs_free_sbi(sbi);
855 	return ret;
856 }
857 
858 /*
859  * Set up the superblock (calls exofs_fill_super eventually)
860  */
exofs_mount(struct file_system_type * type,int flags,const char * dev_name,void * data)861 static struct dentry *exofs_mount(struct file_system_type *type,
862 			  int flags, const char *dev_name,
863 			  void *data)
864 {
865 	struct exofs_mountopt opts;
866 	int ret;
867 
868 	ret = parse_options(data, &opts);
869 	if (ret)
870 		return ERR_PTR(ret);
871 
872 	if (!opts.dev_name)
873 		opts.dev_name = dev_name;
874 	return mount_nodev(type, flags, &opts, exofs_fill_super);
875 }
876 
877 /*
878  * Return information about the file system state in the buffer.  This is used
879  * by the 'df' command, for example.
880  */
exofs_statfs(struct dentry * dentry,struct kstatfs * buf)881 static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf)
882 {
883 	struct super_block *sb = dentry->d_sb;
884 	struct exofs_sb_info *sbi = sb->s_fs_info;
885 	struct ore_io_state *ios;
886 	struct osd_attr attrs[] = {
887 		ATTR_DEF(OSD_APAGE_PARTITION_QUOTAS,
888 			OSD_ATTR_PQ_CAPACITY_QUOTA, sizeof(__be64)),
889 		ATTR_DEF(OSD_APAGE_PARTITION_INFORMATION,
890 			OSD_ATTR_PI_USED_CAPACITY, sizeof(__be64)),
891 	};
892 	uint64_t capacity = ULLONG_MAX;
893 	uint64_t used = ULLONG_MAX;
894 	int ret;
895 
896 	ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios);
897 	if (ret) {
898 		EXOFS_DBGMSG("ore_get_io_state failed.\n");
899 		return ret;
900 	}
901 
902 	ios->in_attr = attrs;
903 	ios->in_attr_len = ARRAY_SIZE(attrs);
904 
905 	ret = ore_read(ios);
906 	if (unlikely(ret))
907 		goto out;
908 
909 	ret = extract_attr_from_ios(ios, &attrs[0]);
910 	if (likely(!ret)) {
911 		capacity = get_unaligned_be64(attrs[0].val_ptr);
912 		if (unlikely(!capacity))
913 			capacity = ULLONG_MAX;
914 	} else
915 		EXOFS_DBGMSG("exofs_statfs: get capacity failed.\n");
916 
917 	ret = extract_attr_from_ios(ios, &attrs[1]);
918 	if (likely(!ret))
919 		used = get_unaligned_be64(attrs[1].val_ptr);
920 	else
921 		EXOFS_DBGMSG("exofs_statfs: get used-space failed.\n");
922 
923 	/* fill in the stats buffer */
924 	buf->f_type = EXOFS_SUPER_MAGIC;
925 	buf->f_bsize = EXOFS_BLKSIZE;
926 	buf->f_blocks = capacity >> 9;
927 	buf->f_bfree = (capacity - used) >> 9;
928 	buf->f_bavail = buf->f_bfree;
929 	buf->f_files = sbi->s_numfiles;
930 	buf->f_ffree = EXOFS_MAX_ID - sbi->s_numfiles;
931 	buf->f_namelen = EXOFS_NAME_LEN;
932 
933 out:
934 	ore_put_io_state(ios);
935 	return ret;
936 }
937 
938 static const struct super_operations exofs_sops = {
939 	.alloc_inode    = exofs_alloc_inode,
940 	.destroy_inode  = exofs_destroy_inode,
941 	.write_inode    = exofs_write_inode,
942 	.evict_inode    = exofs_evict_inode,
943 	.put_super      = exofs_put_super,
944 	.write_super    = exofs_write_super,
945 	.sync_fs	= exofs_sync_fs,
946 	.statfs         = exofs_statfs,
947 };
948 
949 /******************************************************************************
950  * EXPORT OPERATIONS
951  *****************************************************************************/
952 
exofs_get_parent(struct dentry * child)953 static struct dentry *exofs_get_parent(struct dentry *child)
954 {
955 	unsigned long ino = exofs_parent_ino(child);
956 
957 	if (!ino)
958 		return ERR_PTR(-ESTALE);
959 
960 	return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino));
961 }
962 
exofs_nfs_get_inode(struct super_block * sb,u64 ino,u32 generation)963 static struct inode *exofs_nfs_get_inode(struct super_block *sb,
964 		u64 ino, u32 generation)
965 {
966 	struct inode *inode;
967 
968 	inode = exofs_iget(sb, ino);
969 	if (IS_ERR(inode))
970 		return ERR_CAST(inode);
971 	if (generation && inode->i_generation != generation) {
972 		/* we didn't find the right inode.. */
973 		iput(inode);
974 		return ERR_PTR(-ESTALE);
975 	}
976 	return inode;
977 }
978 
exofs_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)979 static struct dentry *exofs_fh_to_dentry(struct super_block *sb,
980 				struct fid *fid, int fh_len, int fh_type)
981 {
982 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
983 				    exofs_nfs_get_inode);
984 }
985 
exofs_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)986 static struct dentry *exofs_fh_to_parent(struct super_block *sb,
987 				struct fid *fid, int fh_len, int fh_type)
988 {
989 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
990 				    exofs_nfs_get_inode);
991 }
992 
993 static const struct export_operations exofs_export_ops = {
994 	.fh_to_dentry = exofs_fh_to_dentry,
995 	.fh_to_parent = exofs_fh_to_parent,
996 	.get_parent = exofs_get_parent,
997 };
998 
999 /******************************************************************************
1000  * INSMOD/RMMOD
1001  *****************************************************************************/
1002 
1003 /*
1004  * struct that describes this file system
1005  */
1006 static struct file_system_type exofs_type = {
1007 	.owner          = THIS_MODULE,
1008 	.name           = "exofs",
1009 	.mount          = exofs_mount,
1010 	.kill_sb        = generic_shutdown_super,
1011 };
1012 
init_exofs(void)1013 static int __init init_exofs(void)
1014 {
1015 	int err;
1016 
1017 	err = init_inodecache();
1018 	if (err)
1019 		goto out;
1020 
1021 	err = register_filesystem(&exofs_type);
1022 	if (err)
1023 		goto out_d;
1024 
1025 	return 0;
1026 out_d:
1027 	destroy_inodecache();
1028 out:
1029 	return err;
1030 }
1031 
exit_exofs(void)1032 static void __exit exit_exofs(void)
1033 {
1034 	unregister_filesystem(&exofs_type);
1035 	destroy_inodecache();
1036 }
1037 
1038 MODULE_AUTHOR("Avishay Traeger <avishay@gmail.com>");
1039 MODULE_DESCRIPTION("exofs");
1040 MODULE_LICENSE("GPL");
1041 
1042 module_init(init_exofs)
1043 module_exit(exit_exofs)
1044