Fix cinflicts created by import of latest lvm2tools LVM2-2-02-56.

This commit is contained in:
haad 2009-12-02 00:58:02 +00:00
parent 7d576ad983
commit d685da8d62
28 changed files with 9472 additions and 5518 deletions

View File

@ -1,77 +0,0 @@
###############################################################################
###############################################################################
##
## Copyright (C) 2009 Red Hat, Inc. All rights reserved.
##
## This copyrighted material is made available to anyone wishing to use,
## modify, copy, or redistribute it subject to the terms and conditions
## of the GNU General Public License v.2.
##
###############################################################################
###############################################################################
SOURCES = clogd.c cluster.c functions.c link_mon.c local.c logging.c
TARGET = $(shell if [ ! -e /usr/include/linux/dm-clog-tfr.h ]; then \
echo 'no_clogd_kernel_headers'; \
elif [ ! -e /usr/include/linux/ext2_fs.h ]; then \
echo 'no_e2fsprogs_devel'; \
elif [ ! -e /usr/include/openais/saCkpt.h ]; then \
echo 'no_openais_devel'; \
else \
echo 'clogd'; \
fi)
ifneq ($(DEBUG), )
CFLAGS += -DDEBUG
endif
ifneq ($(MEMB), )
CFLAGS += -DMEMB
endif
ifneq ($(CKPT), )
CFLAGS += -DCKPT
endif
ifneq ($(RESEND), )
CFLAGS += -DRESEND
endif
CFLAGS += -g
LDFLAGS += $(shell if [ -e /usr/lib64/openais ]; then \
echo '-L/usr/lib64/openais -L/usr/lib64'; \
else \
echo '-L/usr/lib/openais -L/usr/lib'; \
fi)
LDFLAGS += -lcpg -lSaCkpt -lext2fs
all: ${TARGET}
clogd: ${SOURCES}
${CC} ${CFLAGS} -o $@ $^ ${LDFLAGS}
no_clogd_kernel_headers:
echo "Unable to find clogd kernel headers"
exit 1
no_e2fsprogs_devel:
echo "Unable to find ext2fs kernel headers."
echo "Install 'e2fsprogs-devel'?"
exit 1
no_openais_devel:
echo "Unable to find openAIS headers."
echo "http://sources.redhat.com/cluster/wiki/"
exit 1
install: clogd
install -d /usr/sbin
install clogd /usr/sbin
uninstall:
rm /usr/sbin/clogd
clean:
rm -f *.o clogd *~

View File

@ -1,285 +0,0 @@
/* $NetBSD: clogd.c,v 1.1.1.1 2009/02/18 11:16:31 haad Exp $ */
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <sched.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <signal.h>
#include <unistd.h>
#include <fcntl.h>
#include <linux/types.h>
#include <sys/socket.h>
#include <linux/netlink.h>
#include <linux/dm-clog-tfr.h>
#include <linux/dm-ioctl.h>
#include "functions.h"
#include "local.h"
#include "cluster.h"
#include "common.h"
#include "logging.h"
#include "link_mon.h"
static int exit_now = 0;
static sigset_t signal_mask;
static int signal_received;
static void process_signals(void);
static void daemonize(void);
static void init_all(void);
static void cleanup_all(void);
static void set_priority(void);
int main(int argc, char *argv[])
{
daemonize();
init_all();
/* Parent can now exit, we're ready to handle requests */
kill(getppid(), SIGTERM);
/* set_priority(); -- let's try to do w/o this */
LOG_PRINT("Starting clogd:");
LOG_PRINT(" Built: "__DATE__" "__TIME__"\n");
LOG_DBG(" Compiled with debugging.");
while (!exit_now) {
links_monitor();
links_issue_callbacks();
process_signals();
}
exit(EXIT_SUCCESS);
}
/*
* parent_exit_handler: exit the parent
* @sig: the signal
*
*/
static void parent_exit_handler(int sig)
{
exit_now = 1;
}
/*
* create_lockfile - create and lock a lock file
* @lockfile: location of lock file
*
* Returns: 0 on success, -1 otherwise
*/
static int create_lockfile(char *lockfile)
{
int fd;
struct flock lock;
char buffer[50];
if((fd = open(lockfile, O_CREAT | O_WRONLY,
(S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) < 0)
return -errno;
lock.l_type = F_WRLCK;
lock.l_start = 0;
lock.l_whence = SEEK_SET;
lock.l_len = 0;
if (fcntl(fd, F_SETLK, &lock) < 0) {
close(fd);
return -errno;
}
if (ftruncate(fd, 0) < 0) {
close(fd);
return -errno;
}
sprintf(buffer, "%d\n", getpid());
if(write(fd, buffer, strlen(buffer)) < strlen(buffer)){
close(fd);
unlink(lockfile);
return -errno;
}
return 0;
}
static void sig_handler(int sig)
{
sigaddset(&signal_mask, sig);
++signal_received;
}
static void process_signal(int sig){
int r = 0;
switch(sig) {
case SIGINT:
case SIGQUIT:
case SIGTERM:
case SIGHUP:
r += log_status();
break;
case SIGUSR1:
case SIGUSR2:
log_debug();
/*local_debug();*/
cluster_debug();
return;
default:
LOG_PRINT("Unknown signal received... ignoring");
return;
}
if (!r) {
LOG_DBG("No current cluster logs... safe to exit.");
cleanup_all();
exit(EXIT_SUCCESS);
}
LOG_ERROR("Cluster logs exist. Refusing to exit.");
}
static void process_signals(void)
{
int x;
if (!signal_received)
return;
signal_received = 0;
for (x = 1; x < _NSIG; x++) {
if (sigismember(&signal_mask, x)) {
sigdelset(&signal_mask, x);
process_signal(x);
}
}
}
/*
* daemonize
*
* Performs the steps necessary to become a daemon.
*/
static void daemonize(void)
{
int pid;
int status;
signal(SIGTERM, &parent_exit_handler);
pid = fork();
if (pid < 0) {
LOG_ERROR("Unable to fork()");
exit(EXIT_FAILURE);
}
if (pid) {
/* Parent waits here for child to get going */
while (!waitpid(pid, &status, WNOHANG) && !exit_now);
if (exit_now)
exit(EXIT_SUCCESS);
switch (WEXITSTATUS(status)) {
case EXIT_LOCKFILE:
LOG_ERROR("Failed to create lockfile");
LOG_ERROR("Process already running?");
break;
case EXIT_KERNEL_TFR_SOCKET:
LOG_ERROR("Unable to create netlink socket");
break;
case EXIT_KERNEL_TFR_BIND:
LOG_ERROR("Unable to bind to netlink socket");
break;
case EXIT_KERNEL_TFR_SETSOCKOPT:
LOG_ERROR("Unable to setsockopt on netlink socket");
break;
case EXIT_CLUSTER_CKPT_INIT:
LOG_ERROR("Unable to initialize checkpoint service");
LOG_ERROR("Has the cluster infrastructure been started?");
break;
case EXIT_FAILURE:
LOG_ERROR("Failed to start: Generic error");
break;
default:
LOG_ERROR("Failed to start: Unknown error");
break;
}
exit(EXIT_FAILURE);
}
setsid();
chdir("/");
umask(0);
close(0); close(1); close(2);
open("/dev/null", O_RDONLY); /* reopen stdin */
open("/dev/null", O_WRONLY); /* reopen stdout */
open("/dev/null", O_WRONLY); /* reopen stderr */
LOG_OPEN("clogd", LOG_PID, LOG_DAEMON);
if (create_lockfile("/var/run/clogd.pid"))
exit(EXIT_LOCKFILE);
signal(SIGINT, &sig_handler);
signal(SIGQUIT, &sig_handler);
signal(SIGTERM, &sig_handler);
signal(SIGHUP, &sig_handler);
signal(SIGPIPE, SIG_IGN);
signal(SIGUSR1, &sig_handler);
signal(SIGUSR2, &sig_handler);
sigemptyset(&signal_mask);
signal_received = 0;
}
/*
* init_all
*
* Initialize modules. Exit on failure.
*/
static void init_all(void)
{
int r;
if ((r = init_local()) ||
(r = init_cluster())) {
exit(r);
}
}
/*
* cleanup_all
*
* Clean up before exiting
*/
static void cleanup_all(void)
{
cleanup_local();
cleanup_cluster();
}
static void set_priority(void)
{
struct sched_param sched_param;
int res;
res = sched_get_priority_max(SCHED_RR);
if (res != -1) {
sched_param.sched_priority = res;
res = sched_setscheduler(0, SCHED_RR, &sched_param);
}
if (res == -1)
LOG_ERROR("Unable to set SCHED_RR priority.");
}

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
/* $NetBSD: cluster.h,v 1.1.1.1 2009/02/18 11:16:31 haad Exp $ */
#ifndef __CLUSTER_LOG_CLUSTER_DOT_H__
#define __CLUSTER_LOG_CLUSTER_DOT_H__
int init_cluster(void);
void cleanup_cluster(void);
void cluster_debug(void);
int create_cluster_cpg(char *str);
int destroy_cluster_cpg(char *str);
int cluster_send(struct clog_tfr *tfr);
#endif /* __CLUSTER_LOG_CLUSTER_DOT_H__ */

View File

@ -1,42 +0,0 @@
/* $NetBSD: common.h,v 1.1.1.1 2009/02/18 11:16:31 haad Exp $ */
#ifndef __CLUSTER_LOG_COMMON_DOT_H__
#define __CLUSTER_LOG_COMMON_DOT_H__
/*
#define EXIT_SUCCESS 0
#define EXIT_FAILURE 1
*/
#define EXIT_LOCKFILE 2
#define EXIT_KERNEL_TFR_SOCKET 3 /* Failed netlink socket create */
#define EXIT_KERNEL_TFR_BIND 4
#define EXIT_KERNEL_TFR_SETSOCKOPT 5
#define EXIT_CLUSTER_CKPT_INIT 6 /* Failed to init checkpoint */
#define EXIT_QUEUE_NOMEM 7
/* Located in dm-clog-tfr.h
#define RQ_TYPE(x) \
((x) == DM_CLOG_CTR) ? "DM_CLOG_CTR" : \
((x) == DM_CLOG_DTR) ? "DM_CLOG_DTR" : \
((x) == DM_CLOG_PRESUSPEND) ? "DM_CLOG_PRESUSPEND" : \
((x) == DM_CLOG_POSTSUSPEND) ? "DM_CLOG_POSTSUSPEND" : \
((x) == DM_CLOG_RESUME) ? "DM_CLOG_RESUME" : \
((x) == DM_CLOG_GET_REGION_SIZE) ? "DM_CLOG_GET_REGION_SIZE" : \
((x) == DM_CLOG_IS_CLEAN) ? "DM_CLOG_IS_CLEAN" : \
((x) == DM_CLOG_IN_SYNC) ? "DM_CLOG_IN_SYNC" : \
((x) == DM_CLOG_FLUSH) ? "DM_CLOG_FLUSH" : \
((x) == DM_CLOG_MARK_REGION) ? "DM_CLOG_MARK_REGION" : \
((x) == DM_CLOG_CLEAR_REGION) ? "DM_CLOG_CLEAR_REGION" : \
((x) == DM_CLOG_GET_RESYNC_WORK) ? "DM_CLOG_GET_RESYNC_WORK" : \
((x) == DM_CLOG_SET_REGION_SYNC) ? "DM_CLOG_SET_REGION_SYNC" : \
((x) == DM_CLOG_GET_SYNC_COUNT) ? "DM_CLOG_GET_SYNC_COUNT" : \
((x) == DM_CLOG_STATUS_INFO) ? "DM_CLOG_STATUS_INFO" : \
((x) == DM_CLOG_STATUS_TABLE) ? "DM_CLOG_STATUS_TABLE" : \
NULL
*/
#endif /* __CLUSTER_LOG_COMMON_DOT_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +0,0 @@
/* $NetBSD: functions.h,v 1.1.1.1 2009/02/18 11:16:32 haad Exp $ */
#ifndef __CLOG_FUNCTIONS_DOT_H__
#define __CLOG_FUNCTIONS_DOT_H__
#include <linux/dm-clog-tfr.h>
#define LOG_RESUMED 1
#define LOG_SUSPENDED 2
int local_resume(struct clog_tfr *tfr);
int cluster_postsuspend(char *);
int do_request(struct clog_tfr *tfr, int server);
int push_state(const char *uuid, const char *which, char **buf);
int pull_state(const char *uuid, const char *which, char *buf, int size);
int log_get_state(struct clog_tfr *tfr);
int log_status(void);
void log_debug(void);
#endif /* __CLOG_FUNCTIONS_DOT_H__ */

View File

@ -1,140 +0,0 @@
/* $NetBSD: link_mon.c,v 1.1.1.1 2009/02/18 11:16:32 haad Exp $ */
#include <stdlib.h>
#include <errno.h>
#include <poll.h>
#include "logging.h"
struct link_callback {
int fd;
char *name;
void *data;
int (*callback)(void *data);
struct link_callback *next;
};
static int used_pfds = 0;
static int free_pfds = 0;
static struct pollfd *pfds = NULL;
static struct link_callback *callbacks = NULL;
int links_register(int fd, char *name, int (*callback)(void *data), void *data)
{
int i;
struct link_callback *lc;
for (i = 0; i < used_pfds; i++) {
if (fd == pfds[i].fd) {
LOG_ERROR("links_register: Duplicate file descriptor");
return -EINVAL;
}
}
lc = malloc(sizeof(*lc));
if (!lc)
return -ENOMEM;
lc->fd = fd;
lc->name = name;
lc->data = data;
lc->callback = callback;
if (!free_pfds) {
struct pollfd *tmp;
tmp = realloc(pfds, sizeof(struct pollfd) * ((used_pfds*2) + 1));
if (!tmp) {
free(lc);
return -ENOMEM;
}
pfds = tmp;
free_pfds = used_pfds + 1;
}
free_pfds--;
pfds[used_pfds].fd = fd;
pfds[used_pfds].events = POLLIN;
pfds[used_pfds].revents = 0;
used_pfds++;
lc->next = callbacks;
callbacks = lc;
LOG_DBG("Adding %s/%d", lc->name, lc->fd);
LOG_DBG(" used_pfds = %d, free_pfds = %d",
used_pfds, free_pfds);
return 0;
}
int links_unregister(int fd)
{
int i;
struct link_callback *p, *c;
for (i = 0; i < used_pfds; i++)
if (fd == pfds[i].fd) {
/* entire struct is copied (overwritten) */
pfds[i] = pfds[used_pfds - 1];
used_pfds--;
free_pfds++;
}
for (p = NULL, c = callbacks; c; p = c, c = c->next)
if (fd == c->fd) {
LOG_DBG("Freeing up %s/%d", c->name, c->fd);
LOG_DBG(" used_pfds = %d, free_pfds = %d",
used_pfds, free_pfds);
if (p)
p->next = c->next;
else
callbacks = c->next;
free(c);
break;
}
return 0;
}
int links_monitor(void)
{
int i, r;
for (i = 0; i < used_pfds; i++) {
pfds[i].revents = 0;
}
r = poll(pfds, used_pfds, -1);
if (r <= 0)
return r;
r = 0;
/* FIXME: handle POLLHUP */
for (i = 0; i < used_pfds; i++)
if (pfds[i].revents & POLLIN) {
LOG_DBG("Data ready on %d", pfds[i].fd);
/* FIXME: Add this back return 1;*/
r++;
}
return r;
}
int links_issue_callbacks(void)
{
int i;
struct link_callback *lc;
for (i = 0; i < used_pfds; i++)
if (pfds[i].revents & POLLIN)
for (lc = callbacks; lc; lc = lc->next)
if (pfds[i].fd == lc->fd) {
LOG_DBG("Issuing callback on %s/%d",
lc->name, lc->fd);
lc->callback(lc->data);
break;
}
return 0;
}

View File

@ -1,11 +0,0 @@
/* $NetBSD: link_mon.h,v 1.1.1.1 2009/02/18 11:16:32 haad Exp $ */
#ifndef __LINK_MON_DOT_H__
#define __LINK_MON_DOT_H__
int links_register(int fd, char *name, int (*callback)(void *data), void *data);
int links_unregister(int fd);
int links_monitor(void);
int links_issue_callbacks(void);
#endif /* __LINK_MON_DOT_H__ */

View File

@ -1,473 +0,0 @@
/* $NetBSD: list.h,v 1.1.1.1 2009/02/18 11:16:32 haad Exp $ */
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100)
#define LIST_POISON2 ((void *) 0x00200200)
/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
struct list_head {
struct list_head *next, *prev;
};
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
#define INIT_LIST_HEAD(ptr) do { \
(ptr)->next = (ptr); (ptr)->prev = (ptr); \
} while (0)
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return head->next == head;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
*
* NOTE: using list_empty_careful() without synchronization
* can only be safe if the only activity that can happen
* to the list entry is list_del_init(). Eg. it cannot be used
* if another CPU could re-list_add() it.
*
* @head: the list to test.
*/
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
first->prev = head;
head->next = first;
last->next = at;
at->prev = last;
}
/**
* list_splice - join two lists
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(struct list_head *list, struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head);
INIT_LIST_HEAD(list);
}
}
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
/**
* container_of - cast a member of a structure out to the containing structure
*
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
pos = pos->next)
/**
* __list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*
* This variant differs from list_for_each() in that it's the
* simplest possible list iteration code, no prefetching is done.
* Use this for code that knows the list to be very short (empty
* or 1 entry) most of the time.
*/
#define __list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
prefetch(pos->member.prev), &pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
/**
* list_prepare_entry - prepare a pos entry for use as a start point in
* list_for_each_entry_continue
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_struct within the struct.
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, typeof(*pos), member))
/**
* list_for_each_entry_continue - iterate over list of given type
* continuing after existing point
* @pos: the type * to use as a loop counter.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop counter.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}
static inline void hlist_del_init(struct hlist_node *n)
{
if (n->pprev) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
if(next->next)
next->next->pprev = &next->next;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
/**
* hlist_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(tpos, pos, member) \
for (pos = (pos)->next; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from existing point
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(tpos, pos, member) \
for (; pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next)
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @n: another &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
for (pos = (head)->first; \
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
#endif

View File

@ -1,381 +0,0 @@
/* $NetBSD: local.c,v 1.1.1.1 2009/02/18 11:16:33 haad Exp $ */
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <linux/connector.h>
#include <linux/netlink.h>
#include "linux/dm-clog-tfr.h"
#include "functions.h"
#include "cluster.h"
#include "common.h"
#include "logging.h"
#include "link_mon.h"
#include "local.h"
static int cn_fd; /* Connector (netlink) socket fd */
static char recv_buf[2048];
/* FIXME: merge this function with kernel_send_helper */
static int kernel_ack(uint32_t seq, int error)
{
int r;
unsigned char buf[sizeof(struct nlmsghdr) + sizeof(struct cn_msg)];
struct nlmsghdr *nlh = (struct nlmsghdr *)buf;
struct cn_msg *msg = NLMSG_DATA(nlh);
if (error < 0) {
LOG_ERROR("Programmer error: error codes must be positive");
return -EINVAL;
}
memset(buf, 0, sizeof(buf));
nlh->nlmsg_seq = 0;
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct cn_msg));
nlh->nlmsg_flags = 0;
msg->len = 0;
msg->id.idx = 0x4;
msg->id.val = 0x1;
msg->seq = seq;
msg->ack = error;
r = send(cn_fd, nlh, NLMSG_LENGTH(sizeof(struct cn_msg)), 0);
/* FIXME: do better error processing */
if (r <= 0)
return -EBADE;
return 0;
}
/*
* kernel_recv
* @tfr: the newly allocated request from kernel
*
* Read requests from the kernel and allocate space for the new request.
* If there is no request from the kernel, *tfr is NULL.
*
* This function is not thread safe due to returned stack pointer. In fact,
* the returned pointer must not be in-use when this function is called again.
*
* Returns: 0 on success, -EXXX on error
*/
static int kernel_recv(struct clog_tfr **tfr)
{
int r = 0;
int len;
struct cn_msg *msg;
*tfr = NULL;
memset(recv_buf, 0, sizeof(recv_buf));
len = recv(cn_fd, recv_buf, sizeof(recv_buf), 0);
if (len < 0) {
LOG_ERROR("Failed to recv message from kernel");
r = -errno;
goto fail;
}
switch (((struct nlmsghdr *)recv_buf)->nlmsg_type) {
case NLMSG_ERROR:
LOG_ERROR("Unable to recv message from kernel: NLMSG_ERROR");
r = -EBADE;
goto fail;
case NLMSG_DONE:
msg = (struct cn_msg *)NLMSG_DATA((struct nlmsghdr *)recv_buf);
len -= sizeof(struct nlmsghdr);
if (len < sizeof(struct cn_msg)) {
LOG_ERROR("Incomplete request from kernel received");
r = -EBADE;
goto fail;
}
if (msg->len > DM_CLOG_TFR_SIZE) {
LOG_ERROR("Not enough space to receive kernel request (%d/%d)",
msg->len, DM_CLOG_TFR_SIZE);
r = -EBADE;
goto fail;
}
if (!msg->len)
LOG_ERROR("Zero length message received");
len -= sizeof(struct cn_msg);
if (len < msg->len)
LOG_ERROR("len = %d, msg->len = %d", len, msg->len);
msg->data[msg->len] = '\0'; /* Cleaner way to ensure this? */
*tfr = (struct clog_tfr *)msg->data;
if (!(*tfr)->request_type) {
LOG_DBG("Bad transmission, requesting resend [%u]", msg->seq);
r = -EAGAIN;
if (kernel_ack(msg->seq, EAGAIN)) {
LOG_ERROR("Failed to NACK kernel transmission [%u]",
msg->seq);
r = -EBADE;
}
}
break;
default:
LOG_ERROR("Unknown nlmsg_type");
r = -EBADE;
}
fail:
if (r)
*tfr = NULL;
return (r == -EAGAIN) ? 0 : r;
}
static int kernel_send_helper(void *data, int out_size)
{
int r;
struct nlmsghdr *nlh;
struct cn_msg *msg;
unsigned char buf[2048];
memset(buf, 0, sizeof(buf));
nlh = (struct nlmsghdr *)buf;
nlh->nlmsg_seq = 0; /* FIXME: Is this used? */
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = NLMSG_LENGTH(out_size + sizeof(struct cn_msg));
nlh->nlmsg_flags = 0;
msg = NLMSG_DATA(nlh);
memcpy(msg->data, data, out_size);
msg->len = out_size;
msg->id.idx = 0x4;
msg->id.val = 0x1;
msg->seq = 0;
r = send(cn_fd, nlh, NLMSG_LENGTH(out_size + sizeof(struct cn_msg)), 0);
/* FIXME: do better error processing */
if (r <= 0)
return -EBADE;
return 0;
}
/*
* do_local_work
*
* Any processing errors are placed in the 'tfr'
* structure to be reported back to the kernel.
* It may be pointless for this function to
* return an int.
*
* Returns: 0 on success, -EXXX on failure
*/
static int do_local_work(void *data)
{
int r;
struct clog_tfr *tfr = NULL;
r = kernel_recv(&tfr);
if (r)
return r;
if (!tfr)
return 0;
LOG_DBG("[%s] Request from kernel received: [%s/%u]",
SHORT_UUID(tfr->uuid), RQ_TYPE(tfr->request_type),
tfr->seq);
switch (tfr->request_type) {
case DM_CLOG_CTR:
case DM_CLOG_DTR:
case DM_CLOG_IN_SYNC:
case DM_CLOG_GET_SYNC_COUNT:
case DM_CLOG_STATUS_INFO:
case DM_CLOG_STATUS_TABLE:
case DM_CLOG_PRESUSPEND:
/* We do not specify ourselves as server here */
r = do_request(tfr, 0);
if (r)
LOG_DBG("Returning failed request to kernel [%s]",
RQ_TYPE(tfr->request_type));
r = kernel_send(tfr);
if (r)
LOG_ERROR("Failed to respond to kernel [%s]",
RQ_TYPE(tfr->request_type));
break;
case DM_CLOG_RESUME:
/*
* Resume is a special case that requires a local
* component to join the CPG, and a cluster component
* to handle the request.
*/
r = local_resume(tfr);
if (r) {
LOG_DBG("Returning failed request to kernel [%s]",
RQ_TYPE(tfr->request_type));
r = kernel_send(tfr);
if (r)
LOG_ERROR("Failed to respond to kernel [%s]",
RQ_TYPE(tfr->request_type));
break;
}
/* ELSE, fall through */
case DM_CLOG_IS_CLEAN:
case DM_CLOG_FLUSH:
case DM_CLOG_MARK_REGION:
case DM_CLOG_GET_RESYNC_WORK:
case DM_CLOG_SET_REGION_SYNC:
case DM_CLOG_IS_REMOTE_RECOVERING:
case DM_CLOG_POSTSUSPEND:
r = cluster_send(tfr);
if (r) {
tfr->data_size = 0;
tfr->error = r;
kernel_send(tfr);
}
break;
case DM_CLOG_CLEAR_REGION:
r = kernel_ack(tfr->seq, 0);
r = cluster_send(tfr);
if (r) {
/*
* FIXME: store error for delivery on flush
* This would allow us to optimize MARK_REGION
* too.
*/
}
break;
case DM_CLOG_GET_REGION_SIZE:
default:
LOG_ERROR("Invalid log request received, ignoring.");
return 0;
}
if (r && !tfr->error)
tfr->error = r;
return r;
}
/*
* kernel_send
* @tfr: result to pass back to kernel
*
* This function returns the tfr structure
* (containing the results) to the kernel.
* It then frees the structure.
*
* WARNING: should the structure be freed if
* there is an error? I vote 'yes'. If the
* kernel doesn't get the response, it should
* resend the request.
*
* Returns: 0 on success, -EXXX on failure
*/
int kernel_send(struct clog_tfr *tfr)
{
int r;
int size;
if (!tfr)
return -EINVAL;
size = sizeof(struct clog_tfr) + tfr->data_size;
if (!tfr->data_size && !tfr->error) {
/* An ACK is all that is needed */
/* FIXME: add ACK code */
} else if (size > DM_CLOG_TFR_SIZE) {
/*
* If we gotten here, we've already overrun
* our allotted space somewhere.
*
* We must do something, because the kernel
* is waiting for a response.
*/
LOG_ERROR("Not enough space to respond to server");
tfr->error = -ENOSPC;
size = sizeof(struct clog_tfr);
}
r = kernel_send_helper(tfr, size);
if (r)
LOG_ERROR("Failed to send msg to kernel.");
return r;
}
/*
* init_local
*
* Initialize kernel communication socket (netlink)
*
* Returns: 0 on success, values from common.h on failure
*/
int init_local(void)
{
int r = 0;
int opt;
struct sockaddr_nl addr;
cn_fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (cn_fd < 0)
return EXIT_KERNEL_TFR_SOCKET;
/* memset to fix valgrind complaint */
memset(&addr, 0, sizeof(struct sockaddr_nl));
addr.nl_family = AF_NETLINK;
addr.nl_groups = 0x4;
addr.nl_pid = 0;
r = bind(cn_fd, (struct sockaddr *) &addr, sizeof(addr));
if (r < 0) {
close(cn_fd);
return EXIT_KERNEL_TFR_BIND;
}
opt = addr.nl_groups;
r = setsockopt(cn_fd, 270, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt));
if (r) {
close(cn_fd);
return EXIT_KERNEL_TFR_SETSOCKOPT;
}
/*
r = fcntl(cn_fd, F_SETFL, FNDELAY);
*/
links_register(cn_fd, "local", do_local_work, NULL);
return 0;
}
/*
* cleanup_local
*
* Clean up before exiting
*/
void cleanup_local(void)
{
links_unregister(cn_fd);
close(cn_fd);
}

View File

@ -1,11 +0,0 @@
/* $NetBSD: local.h,v 1.1.1.1 2009/02/18 11:16:33 haad Exp $ */
#ifndef __CLUSTER_LOG_LOCAL_DOT_H__
#define __CLUSTER_LOG_LOCAL_DOT_H__
int init_local(void);
void cleanup_local(void);
int kernel_send(struct clog_tfr *tfr);
#endif /* __CLUSTER_LOG_LOCAL_DOT_H__ */

View File

@ -1,27 +0,0 @@
/* $NetBSD: logging.c,v 1.1.1.1 2009/02/18 11:16:33 haad Exp $ */
#include <syslog.h>
int log_tabbing = 0;
int log_is_open = 0;
/*
* Variables for various conditional logging
*/
#ifdef MEMB
int log_membership_change = 1;
#else
int log_membership_change = 0;
#endif
#ifdef CKPT
int log_checkpoint = 1;
#else
int log_checkpoint = 0;
#endif
#ifdef RESEND
int log_resend_requests = 1;
#else
int log_resend_requests = 0;
#endif

View File

@ -1,83 +0,0 @@
/* $NetBSD: logging.h,v 1.1.1.1 2009/02/18 11:16:33 haad Exp $ */
#ifndef __CLUSTER_LOG_LOGGING_DOT_H__
#define __CLUSTER_LOG_LOGGING_DOT_H__
#include <stdio.h>
#include <syslog.h>
#if (BITS_PER_LONG == 64)
#define PRIu64 "lu"
#define PRId64 "ld"
#define PRIo64 "lo"
#define PRIx64 "lx"
#define PRIX64 "lX"
#define SCNu64 "lu"
#define SCNd64 "ld"
#define SCNo64 "lo"
#define SCNx64 "lx"
#define SCNX64 "lX"
#else
#define PRIu64 "Lu"
#define PRId64 "Ld"
#define PRIo64 "Lo"
#define PRIx64 "Lx"
#define PRIX64 "LX"
#define SCNu64 "Lu"
#define SCNd64 "Ld"
#define SCNo64 "Lo"
#define SCNx64 "Lx"
#define SCNX64 "LX"
#endif
/* SHORT_UUID - print last 8 chars of a string */
#define SHORT_UUID(x) (strlen(x) > 8) ? ((x) + (strlen(x) - 8)) : (x)
extern int log_tabbing;
extern int log_is_open;
extern int log_membership_change;
extern int log_checkpoint;
extern int log_resend_requests;
#define LOG_OPEN(ident, option, facility) do { \
openlog(ident, option, facility); \
log_is_open = 1; \
} while (0)
#define LOG_CLOSE(void) do { \
log_is_open = 0; \
closelog(); \
} while (0)
#define LOG_OUTPUT(level, f, arg...) do { \
int __i; \
char __buffer[16]; \
FILE *fp = (level > LOG_NOTICE) ? stderr : stdout; \
if (log_is_open) { \
for (__i = 0; (__i < log_tabbing) && (__i < 15); __i++) \
__buffer[__i] = '\t'; \
__buffer[__i] = '\0'; \
syslog(level, "%s" f "\n", __buffer, ## arg); \
} else { \
for (__i = 0; __i < log_tabbing; __i++) \
fprintf(fp, "\t"); \
fprintf(fp, f "\n", ## arg); \
} \
} while (0)
#ifdef DEBUG
#define LOG_DBG(f, arg...) LOG_OUTPUT(LOG_DEBUG, f, ## arg)
#else /* DEBUG */
#define LOG_DBG(f, arg...)
#endif /* DEBUG */
#define LOG_COND(__X, f, arg...) do {\
if (__X) { \
LOG_OUTPUT(LOG_NOTICE, f, ## arg); \
} \
} while (0)
#define LOG_PRINT(f, arg...) LOG_OUTPUT(LOG_NOTICE, f, ## arg)
#define LOG_ERROR(f, arg...) LOG_OUTPUT(LOG_ERR, f, ## arg)
#endif /* __CLUSTER_LOG_LOGGING_DOT_H__ */

View File

@ -1,8 +1,8 @@
/* $NetBSD: dm-ioctl.h,v 1.3 2009/12/01 23:11:17 haad Exp $ */
/* $NetBSD: dm-ioctl.h,v 1.4 2009/12/02 00:58:02 haad Exp $ */
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
* Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@ -127,6 +127,16 @@ struct dm_ioctl {
uint32_t target_count; /* in/out */
int32_t open_count; /* out */
uint32_t flags; /* in/out */
/*
* event_nr holds either the event number (input and output) or the
* udev cookie value (input only).
* The DM_DEV_WAIT ioctl takes an event number as input.
* The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
* use the field as a cookie to return in the DM_COOKIE
* variable with the uevents they issue.
* For output, the ioctls return the event number, not the cookie.
*/
uint32_t event_nr; /* in/out */
uint32_t padding;
@ -262,7 +272,7 @@ enum {
#define DM_VERSION_MAJOR 6
#define DM_VERSION_MINOR 15
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2008-04-23)"
#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@ -303,4 +313,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
/*
* If set, any table information returned will relate to the inactive
* table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
* is set before using the data returned.
*/
#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
#endif /* _LINUX_DM_IOCTL_H */

View File

@ -1,55 +0,0 @@
/* $NetBSD: lvm2.h,v 1.1.1.1 2009/02/18 11:16:49 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LIB_LVM2_H
#define _LIB_LVM2_H
#include <stdint.h>
/*
* Library Initialisation
* FIXME: For now just #define lvm2_create() and lvm2_destroy() to
* create_toolcontext() and destroy_toolcontext()
*/
struct arg;
struct cmd_context;
struct cmd_context *create_toolcontext(unsigned is_long_lived);
void destroy_toolcontext(struct cmd_context *cmd);
/*
* lvm2_create
lvm_handle_t lvm2_create(void);
*
* Description: Create an LVM2 handle used in many other APIs.
*
* Returns:
* NULL: Fail - unable to initialise handle.
* non-NULL: Success - valid LVM2 handle returned
*/
#define lvm2_create(X) create_toolcontext(1)
/*
* lvm2_destroy
void lvm2_destroy(lvm_handle_t h);
*
* Description: Destroy an LVM2 handle allocated with lvm2_create
*
* Parameters:
* - h (IN): handle obtained from lvm2_create
*/
#define lvm2_destroy(X) destroy_toolcontext(X)
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: fs.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: fs.c,v 1.4 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
@ -31,6 +31,7 @@
static int _mk_dir(const char *dev_dir, const char *vg_name)
{
char vg_path[PATH_MAX];
mode_t old_umask;
if (dm_snprintf(vg_path, sizeof(vg_path), "%s%s",
dev_dir, vg_name) == -1) {
@ -43,10 +44,14 @@ static int _mk_dir(const char *dev_dir, const char *vg_name)
return 1;
log_very_verbose("Creating directory %s", vg_path);
old_umask = umask(DM_DEV_DIR_UMASK);
if (mkdir(vg_path, 0777)) {
log_sys_error("mkdir", vg_path);
umask(old_umask);
return 0;
}
umask(old_umask);
return 1;
}
@ -123,7 +128,7 @@ static int _mk_link(const char *dev_dir, const char *vg_name,
{
char lv_path[PATH_MAX], link_path[PATH_MAX], lvm1_group_path[PATH_MAX];
char vg_path[PATH_MAX];
struct stat buf;
struct stat buf, buf_lp;
#ifdef __NetBSD__
/* Add support for creating links to BSD raw devices */
@ -218,12 +223,34 @@ static int _mk_link(const char *dev_dir, const char *vg_name,
return 0;
}
if (dm_udev_get_sync_support()) {
/* Check udev created the correct link. */
if (!stat(link_path, &buf_lp) &&
!stat(lv_path, &buf)) {
if (buf_lp.st_rdev == buf.st_rdev)
return 1;
else
log_warn("Symlink %s that should have been "
"created by udev does not have "
"correct target. Falling back to "
"direct link creation", lv_path);
} else
log_warn("Symlink %s that should have been "
"created by udev could not be checked "
"for its correctness. Falling back to "
"direct link creation.", lv_path);
}
log_very_verbose("Removing %s", lv_path);
if (unlink(lv_path) < 0) {
log_sys_error("unlink", lv_path);
return 0;
}
}
} else if (dm_udev_get_sync_support())
log_warn("The link %s should had been created by udev "
"but it was not found. Falling back to "
"direct link creation.", lv_path);
log_very_verbose("Linking %s -> %s", lv_path, link_path);
if (symlink(link_path, lv_path) < 0) {
@ -231,10 +258,8 @@ static int _mk_link(const char *dev_dir, const char *vg_name,
return 0;
}
#ifdef HAVE_SELINUX
if (!dm_set_selinux_context(lv_path, S_IFLNK))
return_0;
#endif
return 1;
}
@ -274,9 +299,14 @@ static int _rm_link(const char *dev_dir, const char *vg_name,
return 0;
}
if (lstat(lv_path, &buf) || !S_ISLNK(buf.st_mode)) {
if (errno == ENOENT)
return 1;
if (lstat(lv_path, &buf) && errno == ENOENT)
return 1;
else if (dm_udev_get_sync_support())
log_warn("The link %s should have been removed by udev "
"but it is still present. Falling back to "
"direct link removal.", lv_path);
if (!S_ISLNK(buf.st_mode)) {
log_error("%s not symbolic link - not removing", lv_path);
return 0;
}

View File

@ -1,8 +1,8 @@
/* $NetBSD: toolcontext.c,v 1.4 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: toolcontext.c,v 1.5 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -66,7 +66,7 @@ static int _get_env_vars(struct cmd_context *cmd)
/* Set to "" to avoid using any system directory */
if ((e = getenv("LVM_SYSTEM_DIR"))) {
if (dm_snprintf(cmd->sys_dir, sizeof(cmd->sys_dir),
if (dm_snprintf(cmd->system_dir, sizeof(cmd->system_dir),
"%s", e) < 0) {
log_error("LVM_SYSTEM_DIR environment variable "
"is too long.");
@ -192,7 +192,7 @@ static void _init_logging(struct cmd_context *cmd)
/* Tell device-mapper about our logging */
#ifdef DEVMAPPER_SUPPORT
dm_log_init(print_log);
dm_log_with_errno_init(print_log);
#endif
}
@ -267,6 +267,10 @@ static int _process_config(struct cmd_context *cmd)
return 0;
}
cmd->default_settings.udev_sync = find_config_tree_int(cmd,
"activation/udev_sync",
DEFAULT_UDEV_SYNC);
cmd->stripe_filler = find_config_tree_str(cmd,
"activation/missing_stripe_filler",
DEFAULT_STRIPE_FILLER);
@ -291,6 +295,10 @@ static int _process_config(struct cmd_context *cmd)
}
}
cmd->si_unit_consistency = find_config_tree_int(cmd,
"global/si_unit_consistency",
DEFAULT_SI_UNIT_CONSISTENCY);
return 1;
}
@ -395,7 +403,7 @@ static int _load_config_file(struct cmd_context *cmd, const char *tag)
filler = "_";
if (dm_snprintf(config_file, sizeof(config_file), "%s/lvm%s%s.conf",
cmd->sys_dir, filler, tag) < 0) {
cmd->system_dir, filler, tag) < 0) {
log_error("LVM_SYSTEM_DIR or tag was too long");
return 0;
}
@ -444,7 +452,7 @@ static int _load_config_file(struct cmd_context *cmd, const char *tag)
static int _init_lvm_conf(struct cmd_context *cmd)
{
/* No config file if LVM_SYSTEM_DIR is empty */
if (!*cmd->sys_dir) {
if (!*cmd->system_dir) {
if (!(cmd->cft = create_config_tree(NULL, 0))) {
log_error("Failed to create config tree");
return 0;
@ -518,13 +526,15 @@ static void _destroy_tag_configs(struct cmd_context *cmd)
{
struct config_tree_list *cfl;
if (cmd->cft && cmd->cft->root) {
destroy_config_tree(cmd->cft);
cmd->cft = NULL;
dm_list_iterate_items(cfl, &cmd->config_files) {
if (cfl->cft == cmd->cft)
cmd->cft = NULL;
destroy_config_tree(cfl->cft);
}
dm_list_iterate_items(cfl, &cmd->config_files) {
destroy_config_tree(cfl->cft);
if (cmd->cft) {
destroy_config_tree(cmd->cft);
cmd->cft = NULL;
}
dm_list_init(&cmd->config_files);
@ -665,7 +675,7 @@ static int _init_filters(struct cmd_context *cmd, unsigned load_persistent_cache
if (cache_dir || cache_file_prefix) {
if (dm_snprintf(cache_file, sizeof(cache_file),
"%s%s%s/%s.cache",
cache_dir ? "" : cmd->sys_dir,
cache_dir ? "" : cmd->system_dir,
cache_dir ? "" : "/",
cache_dir ? : DEFAULT_CACHE_SUBDIR,
cache_file_prefix ? : DEFAULT_CACHE_FILE_PREFIX) < 0) {
@ -675,7 +685,7 @@ static int _init_filters(struct cmd_context *cmd, unsigned load_persistent_cache
} else if (!(dev_cache = find_config_tree_str(cmd, "devices/cache", NULL)) &&
(dm_snprintf(cache_file, sizeof(cache_file),
"%s/%s/%s.cache",
cmd->sys_dir, DEFAULT_CACHE_SUBDIR,
cmd->system_dir, DEFAULT_CACHE_SUBDIR,
DEFAULT_CACHE_FILE_PREFIX) < 0)) {
log_error("Persistent cache filename too long.");
return 0;
@ -693,7 +703,7 @@ static int _init_filters(struct cmd_context *cmd, unsigned load_persistent_cache
if (find_config_tree_int(cmd, "devices/write_cache_state", 1))
cmd->dump_filter = 1;
if (!*cmd->sys_dir)
if (!*cmd->system_dir)
cmd->dump_filter = 0;
/*
@ -806,9 +816,57 @@ int init_lvmcache_orphans(struct cmd_context *cmd)
return 1;
}
struct segtype_library {
struct cmd_context *cmd;
void *lib;
const char *libname;
};
int lvm_register_segtype(struct segtype_library *seglib,
struct segment_type *segtype)
{
struct segment_type *segtype2;
segtype->library = seglib->lib;
segtype->cmd = seglib->cmd;
dm_list_iterate_items(segtype2, &seglib->cmd->segtypes) {
if (strcmp(segtype2->name, segtype->name))
continue;
log_error("Duplicate segment type %s: "
"unloading shared library %s",
segtype->name, seglib->libname);
segtype->ops->destroy(segtype);
return 0;
}
dm_list_add(&seglib->cmd->segtypes, &segtype->list);
return 1;
}
static int _init_single_segtype(struct cmd_context *cmd,
struct segtype_library *seglib)
{
struct segment_type *(*init_segtype_fn) (struct cmd_context *);
struct segment_type *segtype;
if (!(init_segtype_fn = dlsym(seglib->lib, "init_segtype"))) {
log_error("Shared library %s does not contain segment type "
"functions", seglib->libname);
return 0;
}
if (!(segtype = init_segtype_fn(seglib->cmd)))
return_0;
return lvm_register_segtype(seglib, segtype);
}
static int _init_segtypes(struct cmd_context *cmd)
{
struct segment_type *segtype;
struct segtype_library seglib = { .cmd = cmd };
#ifdef HAVE_LIBDL
const struct config_node *cn;
@ -854,9 +912,8 @@ static int _init_segtypes(struct cmd_context *cmd)
(cn = find_config_tree_node(cmd, "global/segment_libraries"))) {
struct config_value *cv;
struct segment_type *(*init_segtype_fn) (struct cmd_context *);
void *lib;
struct segment_type *segtype2;
int (*init_multiple_segtypes_fn) (struct cmd_context *,
struct segtype_library *);
for (cv = cn->v; cv; cv = cv->next) {
if (cv->type != CFG_STRING) {
@ -864,32 +921,37 @@ static int _init_segtypes(struct cmd_context *cmd)
"global/segment_libraries");
return 0;
}
if (!(lib = load_shared_library(cmd, cv->v.str,
seglib.libname = cv->v.str;
if (!(seglib.lib = load_shared_library(cmd,
seglib.libname,
"segment type", 0)))
return_0;
if (!(init_segtype_fn = dlsym(lib, "init_segtype"))) {
log_error("Shared library %s does not contain "
"segment type functions", cv->v.str);
dlclose(lib);
return 0;
}
if (!(segtype = init_segtype_fn(cmd)))
return 0;
segtype->library = lib;
dm_list_add(&cmd->segtypes, &segtype->list);
dm_list_iterate_items(segtype2, &cmd->segtypes) {
if ((segtype == segtype2) ||
strcmp(segtype2->name, segtype->name))
continue;
log_error("Duplicate segment type %s: "
"unloading shared library %s",
segtype->name, cv->v.str);
dm_list_del(&segtype->list);
segtype->ops->destroy(segtype);
dlclose(lib);
if ((init_multiple_segtypes_fn =
dlsym(seglib.lib, "init_multiple_segtypes"))) {
if (dlsym(seglib.lib, "init_segtype"))
log_warn("WARNING: Shared lib %s has "
"conflicting init fns. Using"
" init_multiple_segtypes().",
seglib.libname);
} else
init_multiple_segtypes_fn =
_init_single_segtype;
if (!init_multiple_segtypes_fn(cmd, &seglib)) {
struct dm_list *sgtl, *tmp;
log_error("init_multiple_segtypes() failed: "
"Unloading shared library %s",
seglib.libname);
dm_list_iterate_safe(sgtl, tmp, &cmd->segtypes) {
segtype = dm_list_item(sgtl, struct segment_type);
if (segtype->library == seglib.lib) {
dm_list_del(&segtype->list);
segtype->ops->destroy(segtype);
}
}
dlclose(seglib.lib);
return_0;
}
}
}
@ -926,7 +988,7 @@ static int _init_backup(struct cmd_context *cmd)
char default_dir[PATH_MAX];
const char *dir;
if (!cmd->sys_dir) {
if (!cmd->system_dir) {
log_warn("WARNING: Metadata changes will NOT be backed up");
backup_init(cmd, "", 0);
archive_init(cmd, "", 0, 0, 0);
@ -945,10 +1007,10 @@ static int _init_backup(struct cmd_context *cmd)
DEFAULT_ARCHIVE_NUMBER);
if (dm_snprintf
(default_dir, sizeof(default_dir), "%s/%s", cmd->sys_dir,
(default_dir, sizeof(default_dir), "%s/%s", cmd->system_dir,
DEFAULT_ARCHIVE_SUBDIR) == -1) {
log_err("Couldn't create default archive path '%s/%s'.",
cmd->sys_dir, DEFAULT_ARCHIVE_SUBDIR);
log_error("Couldn't create default archive path '%s/%s'.",
cmd->system_dir, DEFAULT_ARCHIVE_SUBDIR);
return 0;
}
@ -957,7 +1019,7 @@ static int _init_backup(struct cmd_context *cmd)
if (!archive_init(cmd, dir, days, min,
cmd->default_settings.archive)) {
log_debug("backup_init failed.");
log_debug("archive_init failed.");
return 0;
}
@ -967,10 +1029,10 @@ static int _init_backup(struct cmd_context *cmd)
DEFAULT_BACKUP_ENABLED);
if (dm_snprintf
(default_dir, sizeof(default_dir), "%s/%s", cmd->sys_dir,
(default_dir, sizeof(default_dir), "%s/%s", cmd->system_dir,
DEFAULT_BACKUP_SUBDIR) == -1) {
log_err("Couldn't create default backup path '%s/%s'.",
cmd->sys_dir, DEFAULT_BACKUP_SUBDIR);
log_error("Couldn't create default backup path '%s/%s'.",
cmd->system_dir, DEFAULT_BACKUP_SUBDIR);
return 0;
}
@ -1000,7 +1062,8 @@ static void _init_globals(struct cmd_context *cmd)
}
/* Entry point */
struct cmd_context *create_toolcontext(unsigned is_long_lived)
struct cmd_context *create_toolcontext(unsigned is_long_lived,
const char *system_dir)
{
struct cmd_context *cmd;
@ -1024,75 +1087,85 @@ struct cmd_context *create_toolcontext(unsigned is_long_lived)
memset(cmd, 0, sizeof(*cmd));
cmd->is_long_lived = is_long_lived;
cmd->handles_missing_pvs = 0;
cmd->handles_unknown_segments = 0;
cmd->hosttags = 0;
dm_list_init(&cmd->formats);
dm_list_init(&cmd->segtypes);
dm_list_init(&cmd->tags);
dm_list_init(&cmd->config_files);
strcpy(cmd->sys_dir, DEFAULT_SYS_DIR);
/* FIXME Make this configurable? */
reset_lvm_errno(1);
/*
* Environment variable LVM_SYSTEM_DIR overrides this below.
*/
if (system_dir)
strncpy(cmd->system_dir, system_dir, sizeof(cmd->system_dir) - 1);
else
strcpy(cmd->system_dir, DEFAULT_SYS_DIR);
if (!_get_env_vars(cmd))
goto error;
goto_out;
/* Create system directory if it doesn't already exist */
if (*cmd->sys_dir && !dm_create_dir(cmd->sys_dir)) {
if (*cmd->system_dir && !dm_create_dir(cmd->system_dir)) {
log_error("Failed to create LVM2 system dir for metadata backups, config "
"files and internal cache.");
log_error("Set environment variable LVM_SYSTEM_DIR to alternative location "
"or empty string.");
goto error;
goto out;
}
if (!(cmd->libmem = dm_pool_create("library", 4 * 1024))) {
log_error("Library memory pool creation failed");
goto error;
goto out;
}
if (!_init_lvm_conf(cmd))
goto error;
goto_out;
_init_logging(cmd);
if (!_init_hostname(cmd))
goto error;
goto_out;
if (!_init_tags(cmd, cmd->cft))
goto error;
goto_out;
if (!_init_tag_configs(cmd))
goto error;
goto_out;
if (!_merge_config_files(cmd))
goto error;
goto_out;
if (!_process_config(cmd))
goto error;
goto_out;
if (!_init_dev_cache(cmd))
goto error;
goto_out;
if (!_init_filters(cmd, 1))
goto error;
goto_out;
if (!(cmd->mem = dm_pool_create("command", 4 * 1024))) {
log_error("Command memory pool creation failed");
goto error;
goto out;
}
memlock_init(cmd);
if (!_init_formats(cmd))
goto error;
goto_out;
if (!init_lvmcache_orphans(cmd))
goto error;
goto_out;
if (!_init_segtypes(cmd))
goto error;
goto_out;
if (!_init_backup(cmd))
goto error;
goto_out;
_init_rand(cmd);
@ -1102,11 +1175,8 @@ struct cmd_context *create_toolcontext(unsigned is_long_lived)
cmd->current_settings = cmd->default_settings;
cmd->config_valid = 1;
out:
return cmd;
error:
dm_free(cmd);
return NULL;
}
static void _destroy_formats(struct dm_list *formats)
@ -1139,12 +1209,32 @@ static void _destroy_segtypes(struct dm_list *segtypes)
lib = segtype->library;
segtype->ops->destroy(segtype);
#ifdef HAVE_LIBDL
if (lib)
/*
* If no segtypes remain from this library, close it.
*/
if (lib) {
struct segment_type *segtype2;
dm_list_iterate_items(segtype2, segtypes)
if (segtype2->library == lib)
goto skip_dlclose;
dlclose(lib);
skip_dlclose:
;
}
#endif
}
}
int refresh_filters(struct cmd_context *cmd)
{
if (cmd->filter) {
cmd->filter->destroy(cmd->filter);
cmd->filter = NULL;
}
return _init_filters(cmd, 0);
}
int refresh_toolcontext(struct cmd_context *cmd)
{
log_verbose("Reloading config files");
@ -1203,14 +1293,9 @@ int refresh_toolcontext(struct cmd_context *cmd)
if (!_init_segtypes(cmd))
return 0;
/*
* If we are a long-lived process, write out the updated persistent
* device cache for the benefit of short-lived processes.
*/
if (cmd->is_long_lived && cmd->dump_filter)
persistent_filter_dump(cmd->filter);
cmd->config_valid = 1;
reset_lvm_errno(1);
return 1;
}
@ -1225,16 +1310,20 @@ void destroy_toolcontext(struct cmd_context *cmd)
label_exit();
_destroy_segtypes(&cmd->segtypes);
_destroy_formats(&cmd->formats);
cmd->filter->destroy(cmd->filter);
dm_pool_destroy(cmd->mem);
if (cmd->filter)
cmd->filter->destroy(cmd->filter);
if (cmd->mem)
dm_pool_destroy(cmd->mem);
dev_cache_exit();
_destroy_tags(cmd);
_destroy_tag_configs(cmd);
dm_pool_destroy(cmd->libmem);
if (cmd->libmem)
dm_pool_destroy(cmd->libmem);
dm_free(cmd);
release_log_memory();
activation_exit();
fin_log();
fin_syslog();
reset_lvm_errno(0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: dev-cache.c,v 1.3 2009/10/16 21:00:41 joerg Exp $ */
/* $NetBSD: dev-cache.c,v 1.4 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
@ -110,6 +110,7 @@ struct device *dev_create_file(const char *filename, struct device *dev,
dev->fd = -1;
dev->open_count = 0;
dev->block_size = -1;
dev->read_ahead = -1;
memset(dev->pvid, 0, sizeof(dev->pvid));
dm_list_init(&dev->open_list);
@ -130,6 +131,7 @@ static struct device *_dev_create(dev_t d)
dev->fd = -1;
dev->open_count = 0;
dev->block_size = -1;
dev->read_ahead = -1;
dev->end = UINT64_C(0);
memset(dev->pvid, 0, sizeof(dev->pvid));
dm_list_init(&dev->open_list);
@ -302,19 +304,19 @@ static int _insert_dev(const char *path, dev_t d)
return_0;
if (!(btree_insert(_cache.devices, (uint32_t) d, dev))) {
log_err("Couldn't insert device into binary tree.");
log_error("Couldn't insert device into binary tree.");
_free(dev);
return 0;
}
}
if (!loopfile && !_add_alias(dev, path)) {
log_err("Couldn't add alias to dev cache.");
log_error("Couldn't add alias to dev cache.");
return 0;
}
if (!dm_hash_insert(_cache.names, path, dev)) {
log_err("Couldn't add name to hash in dev cache.");
log_error("Couldn't add name to hash in dev cache.");
return 0;
}
@ -560,7 +562,7 @@ int dev_cache_init(struct cmd_context *cmd)
}
if (!(_cache.devices = btree_create(_cache.mem))) {
log_err("Couldn't create binary tree for dev-cache.");
log_error("Couldn't create binary tree for dev-cache.");
goto bad;
}
@ -580,7 +582,7 @@ int dev_cache_init(struct cmd_context *cmd)
static void _check_closed(struct device *dev)
{
if (dev->fd >= 0)
log_err("Device '%s' has been left open.", dev_name(dev));
log_error("Device '%s' has been left open.", dev_name(dev));
}
static void _check_for_open_devices(void)

View File

@ -1,4 +1,4 @@
/* $NetBSD: dev-io.c,v 1.4 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: dev-io.c,v 1.5 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
@ -304,6 +304,37 @@ static int _dev_get_size_dev(const struct device *dev, uint64_t *size)
return 1;
}
static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
{
long read_ahead_long;
if (dev->read_ahead != -1) {
*read_ahead = (uint32_t) dev->read_ahead;
return 1;
}
if (!dev_open(dev))
return_0;
if (ioctl(dev->fd, BLKRAGET, &read_ahead_long) < 0) {
log_sys_error("ioctl BLKRAGET", dev_name(dev));
if (!dev_close(dev))
stack;
return 0;
}
if (!dev_close(dev))
stack;
*read_ahead = (uint32_t) read_ahead_long;
dev->read_ahead = read_ahead_long;
log_very_verbose("%s: read_ahead is %u sectors",
dev_name(dev), *read_ahead);
return 1;
}
/*-----------------------------------------------------------------
* Public functions
*---------------------------------------------------------------*/
@ -319,6 +350,19 @@ int dev_get_size(const struct device *dev, uint64_t *size)
return _dev_get_size_dev(dev, size);
}
int dev_get_read_ahead(struct device *dev, uint32_t *read_ahead)
{
if (!dev)
return 0;
if (dev->flags & DEV_REGULAR) {
*read_ahead = 0;
return 1;
}
return _dev_read_ahead_dev(dev, read_ahead);
}
/* FIXME Unused
int dev_get_sectsize(struct device *dev, uint32_t *size)
{

View File

@ -1,55 +0,0 @@
/* $NetBSD: lvm2.h,v 1.1.1.1 2009/02/18 11:16:49 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LIB_LVM2_H
#define _LIB_LVM2_H
#include <stdint.h>
/*
* Library Initialisation
* FIXME: For now just #define lvm2_create() and lvm2_destroy() to
* create_toolcontext() and destroy_toolcontext()
*/
struct arg;
struct cmd_context;
struct cmd_context *create_toolcontext(unsigned is_long_lived);
void destroy_toolcontext(struct cmd_context *cmd);
/*
* lvm2_create
lvm_handle_t lvm2_create(void);
*
* Description: Create an LVM2 handle used in many other APIs.
*
* Returns:
* NULL: Fail - unable to initialise handle.
* non-NULL: Success - valid LVM2 handle returned
*/
#define lvm2_create(X) create_toolcontext(1)
/*
* lvm2_destroy
void lvm2_destroy(lvm_handle_t h);
*
* Description: Destroy an LVM2 handle allocated with lvm2_create
*
* Parameters:
* - h (IN): handle obtained from lvm2_create
*/
#define lvm2_destroy(X) destroy_toolcontext(X)
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: lv_manip.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: lv_manip.c,v 1.4 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
@ -27,6 +27,7 @@
#include "segtype.h"
#include "archiver.h"
#include "activate.h"
#include "str_list.h"
struct lv_names {
const char *old;
@ -404,7 +405,6 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
*/
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
{
struct lv_list *lvl;
struct lv_segment *seg;
uint32_t count = extents;
uint32_t reduction;
@ -435,15 +435,9 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
return 1;
/* Remove the LV if it is now empty */
if (!lv->le_count) {
if (!(lvl = find_lv_in_vg(lv->vg, lv->name)))
return_0;
dm_list_del(&lvl->list);
if (!(lv->status & SNAPSHOT))
lv->vg->lv_count--;
} else if (lv->vg->fid->fmt->ops->lv_setup &&
if (!lv->le_count && !unlink_lv_from_vg(lv))
return_0;
else if (lv->vg->fid->fmt->ops->lv_setup &&
!lv->vg->fid->fmt->ops->lv_setup(lv->vg->fid, lv))
return_0;
@ -613,12 +607,12 @@ static int _log_parallel_areas(struct dm_pool *mem, struct dm_list *parallel_are
if (!parallel_areas)
return 1;
if (!dm_pool_begin_object(mem, 256)) {
log_error("dm_pool_begin_object failed");
return 0;
}
dm_list_iterate_items(spvs, parallel_areas) {
if (!dm_pool_begin_object(mem, 256)) {
log_error("dm_pool_begin_object failed");
return 0;
}
dm_list_iterate_items(pvl, &spvs->pvs) {
if (!dm_pool_grow_object(mem, pv_dev_name(pvl->pv), strlen(pv_dev_name(pvl->pv)))) {
log_error("dm_pool_grow_object failed");
@ -737,7 +731,8 @@ static uint32_t mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint3
*/
static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
struct pv_area **areas,
uint32_t *ix, struct pv_area *log_area)
uint32_t *ix, struct pv_area *log_area,
uint32_t log_len)
{
uint32_t area_len, remaining;
uint32_t s;
@ -772,9 +767,7 @@ static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
if (log_area) {
ah->log_area.pv = log_area->map->pv;
ah->log_area.pe = log_area->start;
ah->log_area.len = mirror_log_extents(ah->log_region_size,
pv_pe_size(log_area->map->pv),
area_len);
ah->log_area.len = log_len;
consume_pv_area(log_area, ah->log_area.len);
}
@ -999,11 +992,15 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
unsigned contiguous = 0, cling = 0, preferred_count = 0;
unsigned ix;
unsigned ix_offset = 0; /* Offset for non-preferred allocations */
unsigned too_small_for_log_count; /* How many too small for log? */
uint32_t max_parallel; /* Maximum extents to allocate */
uint32_t next_le;
struct seg_pvs *spvs;
struct dm_list *parallel_pvs;
uint32_t free_pes;
uint32_t log_len;
struct pv_area *log_area;
unsigned log_needs_allocating;
/* Is there enough total space? */
free_pes = pv_maps_size(pvms);
@ -1130,11 +1127,11 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
if ((contiguous || cling) && (preferred_count < ix_offset))
break;
/* Only allocate log_area the first time around */
log_needs_allocating = (ah->log_count && !ah->log_area.len) ?
1 : 0;
if (ix + ix_offset < ah->area_count +
((ah->log_count && !ah->log_area.len) ?
ah->log_count : 0))
/* FIXME With ALLOC_ANYWHERE, need to split areas */
(log_needs_allocating ? ah->log_count : 0))
break;
/* sort the areas so we allocate from the biggest */
@ -1142,13 +1139,41 @@ static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
qsort(areas + ix_offset, ix, sizeof(*areas),
_comp_area);
/* First time around, use smallest area as log_area */
/* FIXME decide which PV to use at top of function instead */
if (!_alloc_parallel_area(ah, max_parallel, areas,
allocated,
(ah->log_count && !ah->log_area.len) ?
*(areas + ix_offset + ix - 1) :
NULL))
/*
* First time around, if there's a log, allocate it on the
* smallest device that has space for it.
*
* FIXME decide which PV to use at top of function instead
*/
too_small_for_log_count = 0;
if (!log_needs_allocating) {
log_len = 0;
log_area = NULL;
} else {
log_len = mirror_log_extents(ah->log_region_size,
pv_pe_size((*areas)->map->pv),
(max_parallel - *allocated) / ah->area_multiple);
/* How many areas are too small for the log? */
while (too_small_for_log_count < ix_offset + ix &&
(*(areas + ix_offset + ix - 1 -
too_small_for_log_count))->count < log_len)
too_small_for_log_count++;
log_area = *(areas + ix_offset + ix - 1 -
too_small_for_log_count);
}
if (ix + ix_offset < ah->area_count +
(log_needs_allocating ? ah->log_count +
too_small_for_log_count : 0))
/* FIXME With ALLOC_ANYWHERE, need to split areas */
break;
if (!_alloc_parallel_area(ah, max_parallel, areas, allocated,
log_area, log_len))
return_0;
} while (!contiguous && *allocated != needed && can_split);
@ -1215,7 +1240,7 @@ static int _allocate(struct alloc_handle *ah,
/* Allocate an array of pv_areas to hold the largest space on each PV */
if (!(areas = dm_malloc(sizeof(*areas) * areas_size))) {
log_err("Couldn't allocate areas array.");
log_error("Couldn't allocate areas array.");
return 0;
}
@ -1353,8 +1378,8 @@ int lv_add_segment(struct alloc_handle *ah,
return_0;
if ((segtype->flags & SEG_CAN_SPLIT) && !lv_merge_segments(lv)) {
log_err("Couldn't merge segments after extending "
"logical volume.");
log_error("Couldn't merge segments after extending "
"logical volume.");
return 0;
}
@ -1512,7 +1537,7 @@ int lv_add_mirror_lvs(struct logical_volume *lv,
if (!set_lv_segment_area_lv(seg, m, sub_lvs[m - old_area_count],
0, status))
return_0;
sub_lvs[m - old_area_count]->status &= ~VISIBLE_LV;
lv_set_hidden(sub_lvs[m - old_area_count]);
}
lv->status |= MIRRORED;
@ -1610,16 +1635,12 @@ int lv_extend(struct logical_volume *lv,
extents, allocatable_pvs, alloc, NULL)))
return_0;
if (mirrors < 2) {
if (!lv_add_segment(ah, 0, ah->area_count, lv, segtype, stripe_size,
status, 0, NULL))
goto_out;
} else {
if (!_lv_extend_mirror(ah, lv, extents, 0))
return_0;
}
if (mirrors < 2)
r = lv_add_segment(ah, 0, ah->area_count, lv, segtype,
stripe_size, status, 0, NULL);
else
r = _lv_extend_mirror(ah, lv, extents, 0);
out:
alloc_destroy(ah);
return r;
}
@ -1713,19 +1734,24 @@ static int _for_each_sub_lv(struct cmd_context *cmd, struct logical_volume *lv,
void *data),
void *data)
{
struct logical_volume *org;
struct lv_segment *seg;
uint32_t s;
if (lv_is_cow(lv) && lv_is_virtual_origin(org = origin_from_cow(lv)))
if (!func(cmd, org, data))
return_0;
dm_list_iterate_items(seg, &lv->segments) {
if (seg->log_lv && !func(cmd, seg->log_lv, data))
return 0;
return_0;
for (s = 0; s < seg->area_count; s++) {
if (seg_type(seg, s) != AREA_LV)
continue;
if (!func(cmd, seg_lv(seg, s), data))
return 0;
return_0;
if (!_for_each_sub_lv(cmd, seg_lv(seg, s), func, data))
return 0;
return_0;
}
}
@ -1742,9 +1768,12 @@ int lv_rename(struct cmd_context *cmd, struct logical_volume *lv,
{
struct volume_group *vg = lv->vg;
struct lv_names lv_names;
DM_LIST_INIT(lvs_changed);
struct lv_list lvl, lvl2;
int r = 0;
/* rename is not allowed on sub LVs */
if (!lv_is_displayable(lv)) {
if (!lv_is_visible(lv)) {
log_error("Cannot rename internal LV \"%s\".", lv->name);
return 0;
}
@ -1775,27 +1804,30 @@ int lv_rename(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
lvl.lv = lv;
dm_list_add(&lvs_changed, &lvl.list);
/* rename active virtual origin too */
if (lv_is_cow(lv) && lv_is_virtual_origin(lvl2.lv = origin_from_cow(lv)))
dm_list_add_h(&lvs_changed, &lvl2.list);
log_verbose("Writing out updated volume group");
if (!vg_write(vg))
return 0;
backup(vg);
if (!suspend_lv(cmd, lv)) {
stack;
if (!suspend_lvs(cmd, &lvs_changed)) {
vg_revert(vg);
return 0;
goto_out;
}
if (!vg_commit(vg)) {
if (!(r = vg_commit(vg)))
stack;
resume_lv(cmd, lv);
return 0;
}
resume_lv(cmd, lv);
return 1;
resume_lvs(cmd, &lvs_changed);
out:
backup(vg);
return r;
}
char *generate_lv_name(struct volume_group *vg, const char *format,
@ -1818,6 +1850,38 @@ char *generate_lv_name(struct volume_group *vg, const char *format,
return buffer;
}
int vg_max_lv_reached(struct volume_group *vg)
{
if (!vg->max_lv)
return 0;
if (vg->max_lv > vg_visible_lvs(vg))
return 0;
log_verbose("Maximum number of logical volumes (%u) reached "
"in volume group %s", vg->max_lv, vg->name);
return 1;
}
struct logical_volume *alloc_lv(struct dm_pool *mem)
{
struct logical_volume *lv;
if (!(lv = dm_pool_zalloc(mem, sizeof(*lv)))) {
log_error("Unable to allocate logical volume structure");
return NULL;
}
lv->snapshot = NULL;
dm_list_init(&lv->snapshot_segs);
dm_list_init(&lv->segments);
dm_list_init(&lv->tags);
dm_list_init(&lv->segs_using_this_lv);
return lv;
}
/*
* Create a new empty LV.
*/
@ -1825,48 +1889,33 @@ struct logical_volume *lv_create_empty(const char *name,
union lvid *lvid,
uint32_t status,
alloc_policy_t alloc,
int import,
struct volume_group *vg)
{
struct format_instance *fi = vg->fid;
struct cmd_context *cmd = vg->cmd;
struct lv_list *ll = NULL;
struct logical_volume *lv;
char dname[NAME_LEN];
if (vg->max_lv && (vg->max_lv == vg->lv_count)) {
log_error("Maximum number of logical volumes (%u) reached "
"in volume group %s", vg->max_lv, vg->name);
return NULL;
}
if (vg_max_lv_reached(vg))
stack;
if (strstr(name, "%d") &&
!(name = generate_lv_name(vg, name, dname, sizeof(dname)))) {
log_error("Failed to generate unique name for the new "
"logical volume");
return NULL;
}
if (!import)
log_verbose("Creating logical volume %s", name);
if (!(ll = dm_pool_zalloc(cmd->mem, sizeof(*ll))) ||
!(ll->lv = dm_pool_zalloc(cmd->mem, sizeof(*ll->lv)))) {
log_error("lv_list allocation failed");
if (ll)
dm_pool_free(cmd->mem, ll);
} else if (find_lv_in_vg(vg, name)) {
log_error("Unable to create LV %s in Volume Group %s: "
"name already in use.", name, vg->name);
return NULL;
}
lv = ll->lv;
lv->vg = vg;
log_verbose("Creating logical volume %s", name);
if (!(lv->name = dm_pool_strdup(cmd->mem, name))) {
log_error("lv name strdup failed");
if (ll)
dm_pool_free(cmd->mem, ll);
return NULL;
}
if (!(lv = alloc_lv(vg->vgmem)))
return_NULL;
if (!(lv->name = dm_pool_strdup(vg->vgmem, name)))
goto_bad;
lv->status = status;
lv->alloc = alloc;
@ -1875,27 +1924,20 @@ struct logical_volume *lv_create_empty(const char *name,
lv->minor = -1;
lv->size = UINT64_C(0);
lv->le_count = 0;
lv->snapshot = NULL;
dm_list_init(&lv->snapshot_segs);
dm_list_init(&lv->segments);
dm_list_init(&lv->tags);
dm_list_init(&lv->segs_using_this_lv);
if (lvid)
lv->lvid = *lvid;
if (fi->fmt->ops->lv_setup && !fi->fmt->ops->lv_setup(fi, lv)) {
if (ll)
dm_pool_free(cmd->mem, ll);
return_NULL;
}
if (!import)
vg->lv_count++;
dm_list_add(&vg->lvs, &ll->list);
if (!link_lv_to_vg(vg, lv))
goto_bad;
if (fi->fmt->ops->lv_setup && !fi->fmt->ops->lv_setup(fi, lv))
goto_bad;
return lv;
bad:
dm_pool_free(vg->vgmem, lv);
return NULL;
}
static int _add_pvs(struct cmd_context *cmd, struct pv_segment *peg,
@ -1964,6 +2006,55 @@ struct dm_list *build_parallel_areas_from_lv(struct cmd_context *cmd,
return parallel_areas;
}
int link_lv_to_vg(struct volume_group *vg, struct logical_volume *lv)
{
struct lv_list *lvl;
if (vg_max_lv_reached(vg))
stack;
if (!(lvl = dm_pool_zalloc(vg->vgmem, sizeof(*lvl))))
return_0;
lvl->lv = lv;
lv->vg = vg;
dm_list_add(&vg->lvs, &lvl->list);
return 1;
}
int unlink_lv_from_vg(struct logical_volume *lv)
{
struct lv_list *lvl;
if (!(lvl = find_lv_in_vg(lv->vg, lv->name)))
return_0;
dm_list_del(&lvl->list);
return 1;
}
void lv_set_visible(struct logical_volume *lv)
{
if (lv_is_visible(lv))
return;
lv->status |= VISIBLE_LV;
log_debug("LV %s in VG %s is now visible.", lv->name, lv->vg->name);
}
void lv_set_hidden(struct logical_volume *lv)
{
if (!lv_is_visible(lv))
return;
lv->status &= ~VISIBLE_LV;
log_debug("LV %s in VG %s is now hidden.", lv->name, lv->vg->name);
}
int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
const force_t force)
{
@ -2008,28 +2099,14 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return 0;
}
/*
* Check for confirmation prompts in the following cases:
* 1) Clustered VG, and some remote nodes have the LV active
* 2) Non-clustered VG, but LV active locally
*/
if (vg_is_clustered(vg) && !activate_lv_excl(cmd, lv) &&
(force == PROMPT)) {
if (yes_no_prompt("Logical volume \"%s\" is active on other "
"cluster nodes. Really remove? [y/n]: ",
lv->name) == 'n') {
log_print("Logical volume \"%s\" not removed",
lv->name);
return 0;
}
} else if (info.exists && (force == PROMPT)) {
if (yes_no_prompt("Do you really want to remove active "
"logical volume \"%s\"? [y/n]: ",
lv->name) == 'n') {
log_print("Logical volume \"%s\" not removed",
lv->name);
return 0;
}
if (lv_is_active(lv) && (force == PROMPT) &&
lv_is_visible(lv) &&
yes_no_prompt("Do you really want to remove active "
"%slogical volume %s? [y/n]: ",
vg_is_clustered(vg) ? "clustered " : "",
lv->name) == 'n') {
log_print("Logical volume %s not removed", lv->name);
return 0;
}
}
@ -2057,14 +2134,11 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
}
/* store it on disks */
if (!vg_write(vg))
return 0;
if (!vg_write(vg) || !vg_commit(vg))
return_0;
backup(vg);
if (!vg_commit(vg))
return 0;
/* If no snapshots left, reload without -real. */
if (origin && !lv_is_origin(origin)) {
if (!suspend_lv(cmd, origin))
@ -2073,7 +2147,9 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
log_error("Failed to resume %s.", origin->name);
}
log_print("Logical volume \"%s\" successfully removed", lv->name);
if (lv_is_visible(lv))
log_print("Logical volume \"%s\" successfully removed", lv->name);
return 1;
}
@ -2404,7 +2480,7 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
}
if (!(layer_lv = lv_create_empty(name, NULL, LVM_READ | LVM_WRITE,
ALLOC_INHERIT, 0, lv_where->vg))) {
ALLOC_INHERIT, lv_where->vg))) {
log_error("Creation of layer LV failed");
return NULL;
}
@ -2718,3 +2794,385 @@ int set_lv(struct cmd_context *cmd, struct logical_volume *lv,
return 1;
}
static struct logical_volume *_create_virtual_origin(struct cmd_context *cmd,
struct volume_group *vg,
const char *lv_name,
uint32_t permission,
uint64_t voriginextents)
{
const struct segment_type *segtype;
size_t len;
char *vorigin_name;
struct logical_volume *lv;
if (!(segtype = get_segtype_from_string(cmd, "zero"))) {
log_error("Zero segment type for virtual origin not found");
return NULL;
}
len = strlen(lv_name) + 32;
if (!(vorigin_name = alloca(len)) ||
dm_snprintf(vorigin_name, len, "%s_vorigin", lv_name) < 0) {
log_error("Virtual origin name allocation failed.");
return NULL;
}
if (!(lv = lv_create_empty(vorigin_name, NULL, permission,
ALLOC_INHERIT, vg)))
return_NULL;
if (!lv_extend(lv, segtype, 1, 0, 1, voriginextents, NULL, 0u, 0u,
NULL, ALLOC_INHERIT))
return_NULL;
/* store vg on disk(s) */
if (!vg_write(vg) || !vg_commit(vg))
return_NULL;
backup(vg);
return lv;
}
int lv_create_single(struct volume_group *vg,
struct lvcreate_params *lp)
{
struct cmd_context *cmd = vg->cmd;
uint32_t size_rest;
uint32_t status = 0;
struct logical_volume *lv, *org = NULL;
int origin_active = 0;
char lv_name_buf[128];
const char *lv_name;
struct lvinfo info;
if (lp->lv_name && find_lv_in_vg(vg, lp->lv_name)) {
log_error("Logical volume \"%s\" already exists in "
"volume group \"%s\"", lp->lv_name, lp->vg_name);
return 0;
}
if (vg_max_lv_reached(vg)) {
log_error("Maximum number of logical volumes (%u) reached "
"in volume group %s", vg->max_lv, vg->name);
return 0;
}
if (lp->mirrors > 1 && !(vg->fid->fmt->features & FMT_SEGMENTS)) {
log_error("Metadata does not support mirroring.");
return 0;
}
if (lp->read_ahead != DM_READ_AHEAD_AUTO &&
lp->read_ahead != DM_READ_AHEAD_NONE &&
(vg->fid->fmt->features & FMT_RESTRICTED_READAHEAD) &&
(lp->read_ahead < 2 || lp->read_ahead > 120)) {
log_error("Metadata only supports readahead values between 2 and 120.");
return 0;
}
if (lp->stripe_size > vg->extent_size) {
log_error("Reducing requested stripe size %s to maximum, "
"physical extent size %s",
display_size(cmd, (uint64_t) lp->stripe_size),
display_size(cmd, (uint64_t) vg->extent_size));
lp->stripe_size = vg->extent_size;
}
/* Need to check the vg's format to verify this - the cmd format isn't setup properly yet */
if (lp->stripes > 1 &&
!(vg->fid->fmt->features & FMT_UNLIMITED_STRIPESIZE) &&
(lp->stripe_size > STRIPE_SIZE_MAX)) {
log_error("Stripe size may not exceed %s",
display_size(cmd, (uint64_t) STRIPE_SIZE_MAX));
return 0;
}
if ((size_rest = lp->extents % lp->stripes)) {
log_print("Rounding size (%d extents) up to stripe boundary "
"size (%d extents)", lp->extents,
lp->extents - size_rest + lp->stripes);
lp->extents = lp->extents - size_rest + lp->stripes;
}
if (lp->zero && !activation()) {
log_error("Can't wipe start of new LV without using "
"device-mapper kernel driver");
return 0;
}
status |= lp->permission | VISIBLE_LV;
if (lp->snapshot) {
if (!activation()) {
log_error("Can't create snapshot without using "
"device-mapper kernel driver");
return 0;
}
/* FIXME Allow exclusive activation. */
if (vg_is_clustered(vg)) {
log_error("Clustered snapshots are not yet supported.");
return 0;
}
/* Must zero cow */
status |= LVM_WRITE;
if (lp->voriginsize)
origin_active = 1;
else {
if (!(org = find_lv(vg, lp->origin))) {
log_error("Couldn't find origin volume '%s'.",
lp->origin);
return 0;
}
if (lv_is_virtual_origin(org)) {
log_error("Can't share virtual origins. "
"Use --virtualsize.");
return 0;
}
if (lv_is_cow(org)) {
log_error("Snapshots of snapshots are not "
"supported yet.");
return 0;
}
if (org->status & LOCKED) {
log_error("Snapshots of locked devices are not "
"supported yet");
return 0;
}
if ((org->status & MIRROR_IMAGE) ||
(org->status & MIRROR_LOG)) {
log_error("Snapshots of mirror %ss "
"are not supported",
(org->status & MIRROR_LOG) ?
"log" : "image");
return 0;
}
if (!lv_info(cmd, org, &info, 0, 0)) {
log_error("Check for existence of snapshot "
"origin '%s' failed.", org->name);
return 0;
}
origin_active = info.exists;
}
}
if (!lp->extents) {
log_error("Unable to create new logical volume with no extents");
return 0;
}
if (!seg_is_virtual(lp) &&
vg->free_count < lp->extents) {
log_error("Insufficient free extents (%u) in volume group %s: "
"%u required", vg->free_count, vg->name, lp->extents);
return 0;
}
if (lp->stripes > dm_list_size(lp->pvh) && lp->alloc != ALLOC_ANYWHERE) {
log_error("Number of stripes (%u) must not exceed "
"number of physical volumes (%d)", lp->stripes,
dm_list_size(lp->pvh));
return 0;
}
if (lp->mirrors > 1 && !activation()) {
log_error("Can't create mirror without using "
"device-mapper kernel driver.");
return 0;
}
/* The snapshot segment gets created later */
if (lp->snapshot &&
!(lp->segtype = get_segtype_from_string(cmd, "striped")))
return_0;
if (!archive(vg))
return 0;
if (lp->lv_name)
lv_name = lp->lv_name;
else {
if (!generate_lv_name(vg, "lvol%d", lv_name_buf, sizeof(lv_name_buf))) {
log_error("Failed to generate LV name.");
return 0;
}
lv_name = &lv_name_buf[0];
}
if (lp->tag) {
if (!(vg->fid->fmt->features & FMT_TAGS)) {
log_error("Volume group %s does not support tags",
vg->name);
return 0;
}
}
if (lp->mirrors > 1) {
init_mirror_in_sync(lp->nosync);
if (lp->nosync) {
log_warn("WARNING: New mirror won't be synchronised. "
"Don't read what you didn't write!");
status |= MIRROR_NOTSYNCED;
}
}
if (!(lv = lv_create_empty(lv_name ? lv_name : "lvol%d", NULL,
status, lp->alloc, vg)))
return_0;
if (lp->read_ahead) {
log_verbose("Setting read ahead sectors");
lv->read_ahead = lp->read_ahead;
}
if (lp->minor >= 0) {
lv->major = lp->major;
lv->minor = lp->minor;
lv->status |= FIXED_MINOR;
log_verbose("Setting device number to (%d, %d)", lv->major,
lv->minor);
}
if (lp->tag && !str_list_add(cmd->mem, &lv->tags, lp->tag)) {
log_error("Failed to add tag %s to %s/%s",
lp->tag, lv->vg->name, lv->name);
return 0;
}
if (!lv_extend(lv, lp->segtype, lp->stripes, lp->stripe_size,
1, lp->extents, NULL, 0u, 0u, lp->pvh, lp->alloc))
return_0;
if (lp->mirrors > 1) {
if (!lv_add_mirrors(cmd, lv, lp->mirrors - 1, lp->stripes,
adjusted_mirror_region_size(
vg->extent_size,
lv->le_count,
lp->region_size),
lp->corelog ? 0U : 1U, lp->pvh, lp->alloc,
MIRROR_BY_LV |
(lp->nosync ? MIRROR_SKIP_INIT_SYNC : 0))) {
stack;
goto revert_new_lv;
}
}
/* store vg on disk(s) */
if (!vg_write(vg) || !vg_commit(vg))
return_0;
backup(vg);
if (lp->snapshot) {
if (!activate_lv_excl(cmd, lv)) {
log_error("Aborting. Failed to activate snapshot "
"exception store.");
goto revert_new_lv;
}
} else if (!activate_lv(cmd, lv)) {
if (lp->zero) {
log_error("Aborting. Failed to activate new LV to wipe "
"the start of it.");
goto deactivate_and_revert_new_lv;
}
log_error("Failed to activate new LV.");
return 0;
}
if (!lp->zero && !lp->snapshot)
log_error("WARNING: \"%s\" not zeroed", lv->name);
else if (!set_lv(cmd, lv, UINT64_C(0), 0)) {
log_error("Aborting. Failed to wipe %s.",
lp->snapshot ? "snapshot exception store" :
"start of new LV");
goto deactivate_and_revert_new_lv;
}
if (lp->snapshot) {
/* Reset permission after zeroing */
if (!(lp->permission & LVM_WRITE))
lv->status &= ~LVM_WRITE;
/* COW area must be deactivated if origin is not active */
if (!origin_active && !deactivate_lv(cmd, lv)) {
log_error("Aborting. Couldn't deactivate snapshot "
"COW area. Manual intervention required.");
return 0;
}
/* A virtual origin must be activated explicitly. */
if (lp->voriginsize &&
(!(org = _create_virtual_origin(cmd, vg, lv->name,
lp->permission,
lp->voriginextents)) ||
!activate_lv(cmd, org))) {
log_error("Couldn't create virtual origin for LV %s",
lv->name);
if (org && !lv_remove(org))
stack;
goto deactivate_and_revert_new_lv;
}
/* cow LV remains active and becomes snapshot LV */
if (!vg_add_snapshot(org, lv, NULL,
org->le_count, lp->chunk_size)) {
log_error("Couldn't create snapshot.");
goto deactivate_and_revert_new_lv;
}
/* store vg on disk(s) */
if (!vg_write(vg))
return_0;
if (!suspend_lv(cmd, org)) {
log_error("Failed to suspend origin %s", org->name);
vg_revert(vg);
return 0;
}
if (!vg_commit(vg))
return_0;
if (!resume_lv(cmd, org)) {
log_error("Problem reactivating origin %s", org->name);
return 0;
}
}
/* FIXME out of sequence */
backup(vg);
log_print("Logical volume \"%s\" created", lv->name);
/*
* FIXME: as a sanity check we could try reading the
* last block of the device ?
*/
return 1;
deactivate_and_revert_new_lv:
if (!deactivate_lv(cmd, lv)) {
log_error("Unable to deactivate failed new LV. "
"Manual intervention required.");
return 0;
}
revert_new_lv:
/* FIXME Better to revert to backup of metadata? */
if (!lv_remove(lv) || !vg_write(vg) || !vg_commit(vg))
log_error("Manual intervention may be required to remove "
"abandoned LV(s) before retrying.");
else
backup(vg);
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: libdm-common.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: libdm-common.c,v 1.4 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
@ -27,6 +27,17 @@
#include <sys/param.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <dirent.h>
#ifdef UDEV_SYNC_SUPPORT
# include <sys/types.h>
# include <sys/ipc.h>
# include <sys/sem.h>
#ifdef HAVE_UDEV_QUEUE_GET_UDEV_IS_ACTIVE
# define LIBUDEV_I_KNOW_THE_API_IS_SUBJECT_TO_CHANGE
# include <libudev.h>
#endif
#endif
#ifdef linux
# include <linux/fs.h>
@ -46,14 +57,21 @@ static char _dm_dir[PATH_MAX] = DEV_DIR DM_DIR;
static int _verbose = 0;
#ifdef UDEV_SYNC_SUPPORT
static int _udev_running = -1;
static int _sync_with_udev = 1;
#endif
/*
* Library users can provide their own logging
* function.
*/
static void _default_log(int level, const char *file __attribute((unused)),
int line __attribute((unused)), const char *f, ...)
static void _default_log_line(int level,
const char *file __attribute((unused)),
int line __attribute((unused)), int dm_errno,
const char *f, va_list ap)
{
va_list ap;
int use_stderr = level & _LOG_STDERR;
level &= ~_LOG_STDERR;
@ -61,22 +79,41 @@ static void _default_log(int level, const char *file __attribute((unused)),
if (level > _LOG_WARN && !_verbose)
return;
va_start(ap, f);
if (level < _LOG_WARN)
vfprintf(stderr, f, ap);
else
vfprintf(use_stderr ? stderr : stdout, f, ap);
va_end(ap);
if (level < _LOG_WARN)
fprintf(stderr, "\n");
else
fprintf(use_stderr ? stderr : stdout, "\n");
}
static void _default_log_with_errno(int level,
const char *file __attribute((unused)),
int line __attribute((unused)), int dm_errno,
const char *f, ...)
{
va_list ap;
va_start(ap, f);
_default_log_line(level, file, line, dm_errno, f, ap);
va_end(ap);
}
static void _default_log(int level, const char *file,
int line, const char *f, ...)
{
va_list ap;
va_start(ap, f);
_default_log_line(level, file, line, 0, f, ap);
va_end(ap);
}
dm_log_fn dm_log = _default_log;
dm_log_with_errno_fn dm_log_with_errno = _default_log_with_errno;
void dm_log_init(dm_log_fn fn)
{
@ -84,6 +121,23 @@ void dm_log_init(dm_log_fn fn)
dm_log = fn;
else
dm_log = _default_log;
dm_log_with_errno = _default_log_with_errno;
}
int dm_log_is_non_default(void)
{
return (dm_log == _default_log) ? 0 : 1;
}
void dm_log_with_errno_init(dm_log_with_errno_fn fn)
{
if (fn)
dm_log_with_errno = fn;
else
dm_log_with_errno = _default_log_with_errno;
dm_log = _default_log;
}
void dm_log_init_verbose(int level)
@ -126,19 +180,70 @@ struct dm_task *dm_task_create(int type)
dmt->type = type;
dmt->minor = -1;
dmt->major = -1;
dmt->allow_default_major_fallback = 1;
dmt->uid = DM_DEVICE_UID;
dmt->gid = DM_DEVICE_GID;
dmt->mode = DM_DEVICE_MODE;
dmt->no_open_count = 0;
dmt->read_ahead = DM_READ_AHEAD_AUTO;
dmt->read_ahead_flags = 0;
dmt->event_nr = 0;
dmt->cookie_set = 0;
dmt->query_inactive_table = 0;
return dmt;
}
/*
* Find the name associated with a given device number by scanning _dm_dir.
*/
static char *_find_dm_name_of_device(dev_t st_rdev)
{
const char *name;
char path[PATH_MAX];
struct dirent *dirent;
DIR *d;
struct stat buf;
char *new_name = NULL;
if (!(d = opendir(_dm_dir))) {
log_sys_error("opendir", _dm_dir);
return NULL;
}
while ((dirent = readdir(d))) {
name = dirent->d_name;
if (!strcmp(name, ".") || !strcmp(name, ".."))
continue;
if (dm_snprintf(path, sizeof(path), "%s/%s", _dm_dir,
name) == -1) {
log_error("Couldn't create path for %s", name);
continue;
}
if (stat(path, &buf))
continue;
if (buf.st_rdev == st_rdev) {
if (!(new_name = dm_strdup(name)))
log_error("dm_task_set_name: strdup(%s) failed",
name);
break;
}
}
if (closedir(d))
log_sys_error("closedir", _dm_dir);
return new_name;
}
int dm_task_set_name(struct dm_task *dmt, const char *name)
{
char *pos;
char *new_name = NULL;
char path[PATH_MAX];
struct stat st1, st2;
@ -147,8 +252,8 @@ int dm_task_set_name(struct dm_task *dmt, const char *name)
dmt->dev_name = NULL;
}
/* If path was supplied, remove it if it points to the same device
* as its last component.
/*
* Path supplied for existing device?
*/
if ((pos = strrchr(name, '/'))) {
if (dmt->type == DM_DEVICE_CREATE) {
@ -156,23 +261,42 @@ int dm_task_set_name(struct dm_task *dmt, const char *name)
return 0;
}
snprintf(path, sizeof(path), "%s/%s", _dm_dir, pos + 1);
if (stat(name, &st1) || stat(path, &st2) ||
!(st1.st_dev == st2.st_dev)) {
if (stat(name, &st1)) {
log_error("Device %s not found", name);
return 0;
}
name = pos + 1;
/*
* If supplied path points to same device as last component
* under /dev/mapper, use that name directly. Otherwise call
* _find_dm_name_of_device() to scan _dm_dir for a match.
*/
if (dm_snprintf(path, sizeof(path), "%s/%s", _dm_dir,
pos + 1) == -1) {
log_error("Couldn't create path for %s", pos + 1);
return 0;
}
if (!stat(path, &st2) && (st1.st_rdev == st2.st_rdev))
name = pos + 1;
else if ((new_name = _find_dm_name_of_device(st1.st_rdev)))
name = new_name;
else {
log_error("Device %s not found", name);
return 0;
}
}
if (strlen(name) >= DM_NAME_LEN) {
log_error("Name \"%s\" too long", name);
if (new_name)
dm_free(new_name);
return 0;
}
if (!(dmt->dev_name = dm_strdup(name))) {
if (new_name)
dmt->dev_name = new_name;
else if (!(dmt->dev_name = dm_strdup(name))) {
log_error("dm_task_set_name: strdup(%s) failed", name);
return 0;
}
@ -198,6 +322,7 @@ int dm_task_set_uuid(struct dm_task *dmt, const char *uuid)
int dm_task_set_major(struct dm_task *dmt, int major)
{
dmt->major = major;
dmt->allow_default_major_fallback = 0;
return 1;
}
@ -209,6 +334,16 @@ int dm_task_set_minor(struct dm_task *dmt, int minor)
return 1;
}
int dm_task_set_major_minor(struct dm_task *dmt, int major, int minor,
int allow_default_major_fallback)
{
dmt->major = major;
dmt->minor = minor;
dmt->allow_default_major_fallback = allow_default_major_fallback;
return 1;
}
int dm_task_set_uid(struct dm_task *dmt, uid_t uid)
{
dmt->uid = uid;
@ -248,9 +383,9 @@ int dm_task_add_target(struct dm_task *dmt, uint64_t start, uint64_t size,
return 1;
}
#ifdef HAVE_SELINUX
int dm_set_selinux_context(const char *path, mode_t mode)
{
#ifdef HAVE_SELINUX
security_context_t scontext;
if (is_selinux_enabled() <= 0)
@ -271,12 +406,12 @@ int dm_set_selinux_context(const char *path, mode_t mode)
}
freecon(scontext);
#endif
return 1;
}
#endif
static int _add_dev_node(const char *dev_name, uint32_t major, uint32_t minor,
uid_t uid, gid_t gid, mode_t mode)
uid_t uid, gid_t gid, mode_t mode, int check_udev)
{
char path[PATH_MAX];
struct stat info;
@ -340,10 +475,13 @@ static int _add_dev_node(const char *dev_name, uint32_t major, uint32_t minor,
dev_name);
return 0;
}
}
} else if (dm_udev_get_sync_support() && check_udev)
log_warn("%s not set up by udev: Falling back to direct "
"node creation.", path);
old_mask = umask(0);
if (mknod(path, S_IFBLK | mode, dev) < 0) {
umask(old_mask);
log_error("Unable to make device node for '%s'", dev_name);
return 0;
}
@ -356,15 +494,56 @@ static int _add_dev_node(const char *dev_name, uint32_t major, uint32_t minor,
log_debug("Created %s", path);
#ifdef HAVE_SELINUX
if (!dm_set_selinux_context(path, S_IFBLK))
return 0;
#endif
return 1;
}
static int _rename_dev_node(const char *old_name, const char *new_name)
static int _rm_dev_node(const char *dev_name, int check_udev)
{
char path[PATH_MAX];
struct stat info;
#ifdef __NetBSD__
char rpath[PATH_MAX];
char raw_devname[DM_NAME_LEN+1]; /* r + other device name */
snprintf(raw_devname,sizeof(raw_devname),"r%s",dev_name);
_build_dev_path(rpath, sizeof(rpath), raw_devname);
if (stat(rpath, &info) < 0)
return 1;
if (unlink(rpath) < 0) {
log_error("Unable to unlink device node for '%s'", raw_devname);
return 0;
}
log_debug("Removed %s", rpath);
#endif
_build_dev_path(path, sizeof(path), dev_name);
if (stat(path, &info) < 0)
return 1;
else if (dm_udev_get_sync_support() && check_udev)
log_warn("Node %s was not removed by udev. "
"Falling back to direct node removal.", path);
if (unlink(path) < 0) {
log_error("Unable to unlink device node for '%s'", dev_name);
return 0;
}
log_debug("Removed %s", path);
return 1;
}
static int _rename_dev_node(const char *old_name, const char *new_name,
int check_udev)
{
char oldpath[PATH_MAX];
char newpath[PATH_MAX];
@ -415,6 +594,19 @@ static int _rename_dev_node(const char *old_name, const char *new_name)
"is already present", newpath);
return 0;
}
else if (dm_udev_get_sync_support() && check_udev) {
if (stat(oldpath, &info) < 0 &&
errno == ENOENT)
/* assume udev already deleted this */
return 1;
else {
log_warn("The node %s should have been renamed to %s "
"by udev but old node is still present. "
"Falling back to direct old node removal.",
oldpath, newpath);
return _rm_dev_node(old_name, 0);
}
}
if (unlink(newpath) < 0) {
if (errno == EPERM) {
@ -426,6 +618,11 @@ static int _rename_dev_node(const char *old_name, const char *new_name)
return 0;
}
}
else if (dm_udev_get_sync_support() && check_udev)
log_warn("The node %s should have been renamed to %s "
"by udev but new node is not present. "
"Falling back to direct node rename.",
oldpath, newpath);
if (rename(oldpath, newpath) < 0) {
log_error("Unable to rename device node from '%s' to '%s'",
@ -438,45 +635,6 @@ static int _rename_dev_node(const char *old_name, const char *new_name)
return 1;
}
static int _rm_dev_node(const char *dev_name)
{
char path[PATH_MAX];
struct stat info;
#ifdef __NetBSD__
char rpath[PATH_MAX];
char raw_devname[DM_NAME_LEN+1]; /* r + other device name */
snprintf(raw_devname,sizeof(raw_devname),"r%s",dev_name);
_build_dev_path(rpath, sizeof(rpath), raw_devname);
if (stat(rpath, &info) < 0)
return 1;
if (unlink(rpath) < 0) {
log_error("Unable to unlink device node for '%s'", raw_devname);
return 0;
}
log_debug("Removed %s", rpath);
#endif
_build_dev_path(path, sizeof(path), dev_name);
if (stat(path, &info) < 0)
return 1;
if (unlink(path) < 0) {
log_error("Unable to unlink device node for '%s'", dev_name);
return 0;
}
log_debug("Removed %s", path);
return 1;
}
#ifdef linux
static int _open_dev_node(const char *dev_name)
{
@ -599,15 +757,16 @@ typedef enum {
static int _do_node_op(node_op_t type, const char *dev_name, uint32_t major,
uint32_t minor, uid_t uid, gid_t gid, mode_t mode,
const char *old_name, uint32_t read_ahead,
uint32_t read_ahead_flags)
uint32_t read_ahead_flags, int check_udev)
{
switch (type) {
case NODE_ADD:
return _add_dev_node(dev_name, major, minor, uid, gid, mode);
return _add_dev_node(dev_name, major, minor, uid, gid,
mode, check_udev);
case NODE_DEL:
return _rm_dev_node(dev_name);
return _rm_dev_node(dev_name, check_udev);
case NODE_RENAME:
return _rename_dev_node(old_name, dev_name);
return _rename_dev_node(old_name, dev_name, check_udev);
case NODE_READ_AHEAD:
return _set_dev_node_read_ahead(dev_name, read_ahead,
read_ahead_flags);
@ -630,6 +789,7 @@ struct node_op_parms {
uint32_t read_ahead;
uint32_t read_ahead_flags;
char *old_name;
int check_udev;
char names[0];
};
@ -643,7 +803,7 @@ static void _store_str(char **pos, char **ptr, const char *str)
static int _stack_node_op(node_op_t type, const char *dev_name, uint32_t major,
uint32_t minor, uid_t uid, gid_t gid, mode_t mode,
const char *old_name, uint32_t read_ahead,
uint32_t read_ahead_flags)
uint32_t read_ahead_flags, int check_udev)
{
struct node_op_parms *nop;
struct dm_list *noph, *nopht;
@ -677,6 +837,7 @@ static int _stack_node_op(node_op_t type, const char *dev_name, uint32_t major,
nop->mode = mode;
nop->read_ahead = read_ahead;
nop->read_ahead_flags = read_ahead_flags;
nop->check_udev = check_udev;
_store_str(&pos, &nop->dev_name, dev_name);
_store_str(&pos, &nop->old_name, old_name);
@ -695,35 +856,37 @@ static void _pop_node_ops(void)
nop = dm_list_item(noph, struct node_op_parms);
_do_node_op(nop->type, nop->dev_name, nop->major, nop->minor,
nop->uid, nop->gid, nop->mode, nop->old_name,
nop->read_ahead, nop->read_ahead_flags);
nop->read_ahead, nop->read_ahead_flags,
nop->check_udev);
dm_list_del(&nop->list);
dm_free(nop);
}
}
int add_dev_node(const char *dev_name, uint32_t major, uint32_t minor,
uid_t uid, gid_t gid, mode_t mode)
uid_t uid, gid_t gid, mode_t mode, int check_udev)
{
log_debug("%s: Stacking NODE_ADD (%" PRIu32 ",%" PRIu32 ") %u:%u 0%o",
dev_name, major, minor, uid, gid, mode);
return _stack_node_op(NODE_ADD, dev_name, major, minor, uid, gid, mode,
"", 0, 0);
return _stack_node_op(NODE_ADD, dev_name, major, minor, uid,
gid, mode, "", 0, 0, check_udev);
}
int rename_dev_node(const char *old_name, const char *new_name)
int rename_dev_node(const char *old_name, const char *new_name, int check_udev)
{
log_debug("%s: Stacking NODE_RENAME to %s", old_name, new_name);
return _stack_node_op(NODE_RENAME, new_name, 0, 0, 0, 0, 0, old_name,
0, 0);
return _stack_node_op(NODE_RENAME, new_name, 0, 0, 0,
0, 0, old_name, 0, 0, check_udev);
}
int rm_dev_node(const char *dev_name)
int rm_dev_node(const char *dev_name, int check_udev)
{
log_debug("%s: Stacking NODE_DEL (replaces other stacked ops)", dev_name);
return _stack_node_op(NODE_DEL, dev_name, 0, 0, 0, 0, 0, "", 0, 0);
return _stack_node_op(NODE_DEL, dev_name, 0, 0, 0,
0, 0, "", 0, 0, check_udev);
}
int set_dev_node_read_ahead(const char *dev_name, uint32_t read_ahead,
@ -735,8 +898,8 @@ int set_dev_node_read_ahead(const char *dev_name, uint32_t read_ahead,
log_debug("%s: Stacking NODE_READ_AHEAD %" PRIu32 " (flags=%" PRIu32
")", dev_name, read_ahead, read_ahead_flags);
return _stack_node_op(NODE_READ_AHEAD, dev_name, 0, 0, 0, 0, 0, "",
read_ahead, read_ahead_flags);
return _stack_node_op(NODE_READ_AHEAD, dev_name, 0, 0, 0, 0,
0, "", read_ahead, read_ahead_flags, 0);
}
void update_devs(void)
@ -813,3 +976,367 @@ out:
return r;
}
#ifndef UDEV_SYNC_SUPPORT
void dm_udev_set_sync_support(int sync_with_udev)
{
}
int dm_udev_get_sync_support(void)
{
return 0;
}
int dm_task_set_cookie(struct dm_task *dmt, uint32_t *cookie, uint16_t flags)
{
if (dm_cookie_supported())
dmt->event_nr = flags << DM_UDEV_FLAGS_SHIFT;
*cookie = 0;
return 1;
}
int dm_udev_complete(uint32_t cookie)
{
return 1;
}
int dm_udev_wait(uint32_t cookie)
{
return 1;
}
#else /* UDEV_SYNC_SUPPORT */
static int _check_udev_is_running(void)
{
# ifndef HAVE_UDEV_QUEUE_GET_UDEV_IS_ACTIVE
log_debug("Could not get udev state because libudev library "
"was not found and it was not compiled in. "
"Assuming udev is not running.");
return 0;
# else /* HAVE_UDEV_QUEUE_GET_UDEV_IS_ACTIVE */
struct udev *udev;
struct udev_queue *udev_queue;
int r;
if (!(udev = udev_new()))
goto_bad;
if (!(udev_queue = udev_queue_new(udev))) {
udev_unref(udev);
goto_bad;
}
if (!(r = udev_queue_get_udev_is_active(udev_queue)))
log_debug("Udev is not running. "
"Not using udev synchronisation code.");
udev_queue_unref(udev_queue);
udev_unref(udev);
return r;
bad:
log_error("Could not get udev state. Assuming udev is not running.");
return 0;
# endif /* HAVE_UDEV_QUEUE_GET_UDEV_IS_ACTIVE */
}
void dm_udev_set_sync_support(int sync_with_udev)
{
if (_udev_running < 0)
_udev_running = _check_udev_is_running();
_sync_with_udev = sync_with_udev;
}
int dm_udev_get_sync_support(void)
{
if (_udev_running < 0)
_udev_running = _check_udev_is_running();
return dm_cookie_supported() && _udev_running && _sync_with_udev;
}
static int _get_cookie_sem(uint32_t cookie, int *semid)
{
if (cookie >> 16 != DM_COOKIE_MAGIC) {
log_error("Could not continue to access notification "
"semaphore identified by cookie value %"
PRIu32 " (0x%x). Incorrect cookie prefix.",
cookie, cookie);
return 0;
}
if ((*semid = semget((key_t) cookie, 1, 0)) >= 0)
return 1;
switch (errno) {
case ENOENT:
log_error("Could not find notification "
"semaphore identified by cookie "
"value %" PRIu32 " (0x%x)",
cookie, cookie);
break;
case EACCES:
log_error("No permission to access "
"notificaton semaphore identified "
"by cookie value %" PRIu32 " (0x%x)",
cookie, cookie);
break;
default:
log_error("Failed to access notification "
"semaphore identified by cookie "
"value %" PRIu32 " (0x%x): %s",
cookie, cookie, strerror(errno));
break;
}
return 0;
}
static int _udev_notify_sem_inc(uint32_t cookie, int semid)
{
struct sembuf sb = {0, 1, 0};
if (semop(semid, &sb, 1) < 0) {
log_error("semid %d: semop failed for cookie 0x%" PRIx32 ": %s",
semid, cookie, strerror(errno));
return 0;
}
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) incremented",
cookie, semid);
return 1;
}
static int _udev_notify_sem_dec(uint32_t cookie, int semid)
{
struct sembuf sb = {0, -1, IPC_NOWAIT};
if (semop(semid, &sb, 1) < 0) {
switch (errno) {
case EAGAIN:
log_error("semid %d: semop failed for cookie "
"0x%" PRIx32 ": "
"incorrect semaphore state",
semid, cookie);
break;
default:
log_error("semid %d: semop failed for cookie "
"0x%" PRIx32 ": %s",
semid, cookie, strerror(errno));
break;
}
return 0;
}
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) decremented",
cookie, semid);
return 1;
}
static int _udev_notify_sem_destroy(uint32_t cookie, int semid)
{
if (semctl(semid, 0, IPC_RMID, 0) < 0) {
log_error("Could not cleanup notification semaphore "
"identified by cookie value %" PRIu32 " (0x%x): %s",
cookie, cookie, strerror(errno));
return 0;
}
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) destroyed", cookie,
semid);
return 1;
}
static int _udev_notify_sem_create(uint32_t *cookie, int *semid)
{
int fd;
int gen_semid;
uint16_t base_cookie;
uint32_t gen_cookie;
if ((fd = open("/dev/urandom", O_RDONLY)) < 0) {
log_error("Failed to open /dev/urandom "
"to create random cookie value");
*cookie = 0;
return 0;
}
/* Generate random cookie value. Be sure it is unique and non-zero. */
do {
/* FIXME Handle non-error returns from read(). Move _io() into libdm? */
if (read(fd, &base_cookie, sizeof(base_cookie)) != sizeof(base_cookie)) {
log_error("Failed to initialize notification cookie");
goto bad;
}
gen_cookie = DM_COOKIE_MAGIC << 16 | base_cookie;
if (base_cookie && (gen_semid = semget((key_t) gen_cookie,
1, 0600 | IPC_CREAT | IPC_EXCL)) < 0) {
switch (errno) {
case EEXIST:
/* if the semaphore key exists, we
* simply generate another random one */
base_cookie = 0;
break;
case ENOMEM:
log_error("Not enough memory to create "
"notification semaphore");
goto bad;
case ENOSPC:
log_error("Limit for the maximum number "
"of semaphores reached. You can "
"check and set the limits in "
"/proc/sys/kernel/sem.");
goto bad;
default:
log_error("Failed to create notification "
"semaphore: %s", strerror(errno));
goto bad;
}
}
} while (!base_cookie);
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) created",
gen_cookie, gen_semid);
if (semctl(gen_semid, 0, SETVAL, 1) < 0) {
log_error("semid %d: semctl failed: %s", gen_semid, strerror(errno));
/* We have to destroy just created semaphore
* so it won't stay in the system. */
(void) _udev_notify_sem_destroy(gen_cookie, gen_semid);
goto bad;
}
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) incremented",
gen_cookie, gen_semid);
if (close(fd))
stack;
*semid = gen_semid;
*cookie = gen_cookie;
return 1;
bad:
if (close(fd))
stack;
*cookie = 0;
return 0;
}
int dm_task_set_cookie(struct dm_task *dmt, uint32_t *cookie, uint16_t flags)
{
int semid;
if (dm_cookie_supported())
dmt->event_nr = flags << DM_UDEV_FLAGS_SHIFT;
if (!dm_udev_get_sync_support()) {
*cookie = 0;
return 1;
}
if (*cookie) {
if (!_get_cookie_sem(*cookie, &semid))
goto_bad;
} else if (!_udev_notify_sem_create(cookie, &semid))
goto_bad;
if (!_udev_notify_sem_inc(*cookie, semid)) {
log_error("Could not set notification semaphore "
"identified by cookie value %" PRIu32 " (0x%x)",
*cookie, *cookie);
goto bad;
}
dmt->event_nr |= ~DM_UDEV_FLAGS_MASK & *cookie;
dmt->cookie_set = 1;
log_debug("Udev cookie 0x%" PRIx32 " (semid %d) assigned to dm_task "
"with flags 0x%" PRIx16, *cookie, semid, flags);
return 1;
bad:
dmt->event_nr = 0;
return 0;
}
int dm_udev_complete(uint32_t cookie)
{
int semid;
if (!cookie || !dm_udev_get_sync_support())
return 1;
if (!_get_cookie_sem(cookie, &semid))
return_0;
if (!_udev_notify_sem_dec(cookie, semid)) {
log_error("Could not signal waiting process using notification "
"semaphore identified by cookie value %" PRIu32 " (0x%x)",
cookie, cookie);
return 0;
}
return 1;
}
int dm_udev_wait(uint32_t cookie)
{
int semid;
struct sembuf sb = {0, 0, 0};
if (!cookie || !dm_udev_get_sync_support())
return 1;
if (!_get_cookie_sem(cookie, &semid))
return_0;
if (!_udev_notify_sem_dec(cookie, semid)) {
log_error("Failed to set a proper state for notification "
"semaphore identified by cookie value %" PRIu32 " (0x%x) "
"to initialize waiting for incoming notifications.",
cookie, cookie);
(void) _udev_notify_sem_destroy(cookie, semid);
return 0;
}
log_debug("Udev cookie 0x%" PRIx32 " (semid %d): Waiting for zero",
cookie, semid);
repeat_wait:
if (semop(semid, &sb, 1) < 0) {
if (errno == EINTR)
goto repeat_wait;
else if (errno == EIDRM)
return 1;
log_error("Could not set wait state for notification semaphore "
"identified by cookie value %" PRIu32 " (0x%x): %s",
cookie, cookie, strerror(errno));
(void) _udev_notify_sem_destroy(cookie, semid);
return 0;
}
return _udev_notify_sem_destroy(cookie, semid);
}
#endif /* UDEV_SYNC_SUPPORT */

View File

@ -1,4 +1,4 @@
/* $NetBSD: libdm-deptree.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
/* $NetBSD: libdm-deptree.c,v 1.4 2009/12/02 00:58:03 haad Exp $ */
/*
* Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
@ -22,6 +22,7 @@
#include <stdarg.h>
#include <sys/param.h>
#include <sys/utsname.h>
#define MAX_TARGET_PARAMSIZE 500000
@ -30,7 +31,8 @@
/* Supported segment types */
enum {
SEG_ERROR,
SEG_CRYPT,
SEG_ERROR,
SEG_LINEAR,
SEG_MIRRORED,
SEG_SNAPSHOT,
@ -45,6 +47,7 @@ struct {
unsigned type;
const char *target;
} dm_segtypes[] = {
{ SEG_CRYPT, "crypt" },
{ SEG_ERROR, "error" },
{ SEG_LINEAR, "linear" },
{ SEG_MIRRORED, "mirror" },
@ -71,8 +74,8 @@ struct load_segment {
uint64_t size;
unsigned area_count; /* Linear + Striped + Mirrored */
struct dm_list areas; /* Linear + Striped + Mirrored */
unsigned area_count; /* Linear + Striped + Mirrored + Crypt */
struct dm_list areas; /* Linear + Striped + Mirrored + Crypt */
uint32_t stripe_size; /* Striped */
@ -87,6 +90,12 @@ struct load_segment {
unsigned mirror_area_count; /* Mirror */
uint32_t flags; /* Mirror log */
char *uuid; /* Clustered mirror log */
const char *cipher; /* Crypt */
const char *chainmode; /* Crypt */
const char *iv; /* Crypt */
uint64_t iv_offset; /* Crypt */
const char *key; /* Crypt */
};
/* Per-device properties */
@ -123,6 +132,8 @@ struct dm_tree_node {
int activation_priority; /* 0 gets activated first */
uint16_t udev_flags; /* Udev control flags */
void *context; /* External supplied context */
struct load_properties props; /* For creation/table (re)load */
@ -135,6 +146,7 @@ struct dm_tree {
struct dm_tree_node root;
int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
int no_flush; /* 1 sets noflush (mirrors/multipath) */
uint32_t cookie;
};
struct dm_tree *dm_tree_create(void)
@ -293,7 +305,8 @@ static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
const char *name,
const char *uuid,
struct dm_info *info,
void *context)
void *context,
uint16_t udev_flags)
{
struct dm_tree_node *node;
uint64_t dev;
@ -309,6 +322,7 @@ static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
node->uuid = uuid;
node->info = *info;
node->context = context;
node->udev_flags = udev_flags;
node->activation_priority = 0;
dm_list_init(&node->uses);
@ -458,8 +472,8 @@ static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
return_NULL;
if (!(node = _create_dm_tree_node(dtree, name, uuid,
&info, NULL)))
if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
NULL, 0)))
goto_out;
new = 1;
}
@ -577,8 +591,8 @@ struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
info.inactive_table = 0;
info.read_only = 0;
if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2,
&info, context)))
if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
context, 0)))
return_NULL;
/* Attach to root node until a table is supplied */
@ -605,10 +619,31 @@ struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
return_NULL;
dnode->context = context;
dnode->udev_flags = 0;
return dnode;
}
struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
const char *name,
const char *uuid,
uint32_t major,
uint32_t minor,
int read_only,
int clear_inactive,
void *context,
uint16_t udev_flags)
{
struct dm_tree_node *node;
if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
clear_inactive, context)))
node->udev_flags = udev_flags;
return node;
}
void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
uint32_t read_ahead,
uint32_t read_ahead_flags)
@ -642,6 +677,11 @@ void *dm_tree_node_get_context(struct dm_tree_node *node)
return node->context;
}
int dm_tree_node_size_changed(struct dm_tree_node *dnode)
{
return dnode->props.size_changed;
}
int dm_tree_node_num_children(struct dm_tree_node *node, uint32_t inverted)
{
if (inverted) {
@ -808,10 +848,11 @@ static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
return r;
}
static int _deactivate_node(const char *name, uint32_t major, uint32_t minor)
static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
uint32_t *cookie, uint16_t udev_flags)
{
struct dm_task *dmt;
int r;
int r = 0;
log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
@ -822,26 +863,30 @@ static int _deactivate_node(const char *name, uint32_t major, uint32_t minor)
if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
log_error("Failed to set device number for %s deactivation", name);
dm_task_destroy(dmt);
return 0;
goto out;
}
if (!dm_task_no_open_count(dmt))
log_error("Failed to disable open_count");
if (!dm_task_set_cookie(dmt, cookie, udev_flags))
goto out;
r = dm_task_run(dmt);
/* FIXME Until kernel returns actual name so dm-ioctl.c can handle it */
rm_dev_node(name);
rm_dev_node(name, dmt->cookie_set);
/* FIXME Remove node from tree or mark invalid? */
out:
dm_task_destroy(dmt);
return r;
}
static int _rename_node(const char *old_name, const char *new_name, uint32_t major, uint32_t minor)
static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
{
struct dm_task *dmt;
int r = 0;
@ -864,6 +909,9 @@ static int _rename_node(const char *old_name, const char *new_name, uint32_t maj
if (!dm_task_no_open_count(dmt))
log_error("Failed to disable open_count");
if (!dm_task_set_cookie(dmt, cookie, udev_flags))
goto out;
r = dm_task_run(dmt);
out:
@ -875,10 +923,11 @@ out:
/* FIXME Merge with _suspend_node? */
static int _resume_node(const char *name, uint32_t major, uint32_t minor,
uint32_t read_ahead, uint32_t read_ahead_flags,
struct dm_info *newinfo)
struct dm_info *newinfo, uint32_t *cookie,
uint16_t udev_flags)
{
struct dm_task *dmt;
int r;
int r = 0;
log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
@ -890,14 +939,12 @@ static int _resume_node(const char *name, uint32_t major, uint32_t minor,
/* FIXME Kernel should fill in name on return instead */
if (!dm_task_set_name(dmt, name)) {
log_error("Failed to set readahead device name for %s", name);
dm_task_destroy(dmt);
return 0;
goto out;
}
if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
log_error("Failed to set device number for %s resumption.", name);
dm_task_destroy(dmt);
return 0;
goto out;
}
if (!dm_task_no_open_count(dmt))
@ -906,9 +953,13 @@ static int _resume_node(const char *name, uint32_t major, uint32_t minor,
if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
log_error("Failed to set read ahead");
if (!dm_task_set_cookie(dmt, cookie, udev_flags))
goto out;
if ((r = dm_task_run(dmt)))
r = dm_task_get_info(dmt, newinfo);
out:
dm_task_destroy(dmt);
return r;
@ -989,7 +1040,8 @@ int dm_tree_deactivate_children(struct dm_tree_node *dnode,
!info.exists || info.open_count)
continue;
if (!_deactivate_node(name, info.major, info.minor)) {
if (!_deactivate_node(name, info.major, info.minor,
&child->dtree->cookie, child->udev_flags)) {
log_error("Unable to deactivate %s (%" PRIu32
":%" PRIu32 ")", name, info.major,
info.minor);
@ -1133,7 +1185,9 @@ int dm_tree_activate_children(struct dm_tree_node *dnode,
/* Rename? */
if (child->props.new_name) {
if (!_rename_node(name, child->props.new_name, child->info.major, child->info.minor)) {
if (!_rename_node(name, child->props.new_name, child->info.major,
child->info.minor, &child->dtree->cookie,
child->udev_flags)) {
log_error("Failed to rename %s (%" PRIu32
":%" PRIu32 ") to %s", name, child->info.major,
child->info.minor, child->props.new_name);
@ -1147,8 +1201,8 @@ int dm_tree_activate_children(struct dm_tree_node *dnode,
continue;
if (!_resume_node(child->name, child->info.major, child->info.minor,
child->props.read_ahead,
child->props.read_ahead_flags, &newinfo)) {
child->props.read_ahead, child->props.read_ahead_flags,
&newinfo, &child->dtree->cookie, child->udev_flags)) {
log_error("Unable to resume %s (%" PRIu32
":%" PRIu32 ")", child->name, child->info.major,
child->info.minor);
@ -1235,31 +1289,159 @@ do {\
p += w;\
} while (0)
/*
* _emit_areas_line
*
* Returns: 1 on success, 0 on failure
*/
static int _emit_areas_line(struct dm_task *dmt __attribute((unused)),
struct load_segment *seg, char *params,
size_t paramsize, int *pos)
{
struct seg_area *area;
char devbuf[DM_FORMAT_DEV_BUFSIZE];
unsigned first_time = 1;
dm_list_iterate_items(area, &seg->areas) {
if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
return_0;
EMIT_PARAMS(*pos, " %s %" PRIu64, devbuf, area->offset);
EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
devbuf, area->offset);
first_time = 0;
}
return 1;
}
static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uint64_t *seg_start, char *params, size_t paramsize)
/*
* Returns: 1 on success, 0 on failure
*/
static int _mirror_emit_segment_line(struct dm_task *dmt, uint32_t major,
uint32_t minor, struct load_segment *seg,
uint64_t *seg_start, char *params,
size_t paramsize)
{
int r;
int block_on_error = 0;
int handle_errors = 0;
int dm_log_userspace = 0;
struct utsname uts;
unsigned log_parm_count;
int pos = 0;
char logbuf[DM_FORMAT_DEV_BUFSIZE];
const char *logtype;
r = uname(&uts);
if (r)
return_0;
if ((seg->flags & DM_BLOCK_ON_ERROR)) {
/*
* Originally, block_on_error was an argument to the log
* portion of the mirror CTR table. It was renamed to
* "handle_errors" and now resides in the 'features'
* section of the mirror CTR table (i.e. at the end).
*
* We can identify whether to use "block_on_error" or
* "handle_errors" by the dm-mirror module's version
* number (>= 1.12) or by the kernel version (>= 2.6.22).
*/
if (strncmp(uts.release, "2.6.22", 6) >= 0)
handle_errors = 1;
else
block_on_error = 1;
}
if (seg->clustered) {
/* Cluster mirrors require a UUID */
if (!seg->uuid)
return_0;
/*
* Cluster mirrors used to have their own log
* types. Now they are accessed through the
* userspace log type.
*
* The dm-log-userspace module was added to the
* 2.6.31 kernel.
*/
if (strncmp(uts.release, "2.6.31", 6) >= 0)
dm_log_userspace = 1;
}
/* Region size */
log_parm_count = 1;
/* [no]sync, block_on_error etc. */
log_parm_count += hweight32(seg->flags);
/* "handle_errors" is a feature arg now */
if (handle_errors)
log_parm_count--;
/* DM_CORELOG does not count in the param list */
if (seg->flags & DM_CORELOG)
log_parm_count--;
if (seg->clustered) {
log_parm_count++; /* For UUID */
if (!dm_log_userspace)
EMIT_PARAMS(pos, "clustered-");
}
if (!seg->log)
logtype = "core";
else {
logtype = "disk";
log_parm_count++;
if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
return_0;
}
if (dm_log_userspace)
EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
log_parm_count, seg->uuid, logtype);
else
EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
if (seg->log)
EMIT_PARAMS(pos, " %s", logbuf);
EMIT_PARAMS(pos, " %u", seg->region_size);
if (seg->clustered && !dm_log_userspace)
EMIT_PARAMS(pos, " %s", seg->uuid);
if ((seg->flags & DM_NOSYNC))
EMIT_PARAMS(pos, " nosync");
else if ((seg->flags & DM_FORCESYNC))
EMIT_PARAMS(pos, " sync");
if (block_on_error)
EMIT_PARAMS(pos, " block_on_error");
EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0)
return_0;
if (handle_errors)
EMIT_PARAMS(pos, " 1 handle_errors");
return 1;
}
static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
uint32_t minor, struct load_segment *seg,
uint64_t *seg_start, char *params,
size_t paramsize)
{
int pos = 0;
int r;
char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
char logbuf[DM_FORMAT_DEV_BUFSIZE];
const char *logtype;
switch(seg->type) {
case SEG_ERROR:
@ -1267,47 +1449,11 @@ static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uin
case SEG_LINEAR:
break;
case SEG_MIRRORED:
log_parm_count = 1; /* Region size */
log_parm_count += hweight32(seg->flags); /* [no]sync, block_on_error etc. */
if (seg->flags & DM_CORELOG)
log_parm_count--; /* DM_CORELOG does not count in the param list */
if (seg->clustered) {
if (seg->uuid)
log_parm_count++;
EMIT_PARAMS(pos, "clustered-");
}
if (!seg->log)
logtype = "core";
else {
logtype = "disk";
log_parm_count++;
if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
return_0;
}
EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
if (seg->log)
EMIT_PARAMS(pos, " %s", logbuf);
EMIT_PARAMS(pos, " %u", seg->region_size);
if (seg->clustered && seg->uuid)
EMIT_PARAMS(pos, " %s", seg->uuid);
if ((seg->flags & DM_NOSYNC))
EMIT_PARAMS(pos, " nosync");
else if ((seg->flags & DM_FORCESYNC))
EMIT_PARAMS(pos, " sync");
if ((seg->flags & DM_BLOCK_ON_ERROR))
EMIT_PARAMS(pos, " block_on_error");
EMIT_PARAMS(pos, " %u", seg->mirror_area_count);
/* Mirrors are pretty complicated - now in separate function */
r = _mirror_emit_segment_line(dmt, major, minor, seg, seg_start,
params, paramsize);
if (!r)
return_0;
break;
case SEG_SNAPSHOT:
if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
@ -1323,7 +1469,14 @@ static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uin
EMIT_PARAMS(pos, "%s", originbuf);
break;
case SEG_STRIPED:
EMIT_PARAMS(pos, "%u %u", seg->area_count, seg->stripe_size);
EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
break;
case SEG_CRYPT:
EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
seg->chainmode ? "-" : "", seg->chainmode ?: "",
seg->iv ? "-" : "", seg->iv ?: "", seg->key,
seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
seg->iv_offset : *seg_start);
break;
}
@ -1333,8 +1486,8 @@ static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uin
case SEG_SNAPSHOT_ORIGIN:
case SEG_ZERO:
break;
case SEG_CRYPT:
case SEG_LINEAR:
case SEG_MIRRORED:
case SEG_STRIPED:
if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
stack;
@ -1343,7 +1496,8 @@ static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uin
break;
}
log_debug("Adding target: %" PRIu64 " %" PRIu64 " %s %s",
log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
" %" PRIu64 " %s %s", major, minor,
*seg_start, seg->size, dm_segtypes[seg->type].target, params);
if (!dm_task_add_target(dmt, *seg_start, seg->size, dm_segtypes[seg->type].target, params))
@ -1356,8 +1510,8 @@ static int _emit_segment_line(struct dm_task *dmt, struct load_segment *seg, uin
#undef EMIT_PARAMS
static int _emit_segment(struct dm_task *dmt, struct load_segment *seg,
uint64_t *seg_start)
static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
struct load_segment *seg, uint64_t *seg_start)
{
char *params;
size_t paramsize = 4096;
@ -1370,7 +1524,8 @@ static int _emit_segment(struct dm_task *dmt, struct load_segment *seg,
}
params[0] = '\0';
ret = _emit_segment_line(dmt, seg, seg_start, params, paramsize);
ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
params, paramsize);
dm_free(params);
if (!ret)
@ -1396,7 +1551,8 @@ static int _load_node(struct dm_tree_node *dnode)
struct load_segment *seg;
uint64_t seg_start = 0;
log_verbose("Loading %s table", dnode->name);
log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
dnode->info.major, dnode->info.minor);
if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
log_error("Reload dm_task creation failed for %s", dnode->name);
@ -1418,7 +1574,8 @@ static int _load_node(struct dm_tree_node *dnode)
log_error("Failed to disable open_count");
dm_list_iterate_items(seg, &dnode->props.segs)
if (!_emit_segment(dmt, seg, &seg_start))
if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
seg, &seg_start))
goto_out;
if (!dm_task_suppress_identical_reload(dmt))
@ -1483,6 +1640,10 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
}
}
/* Propagate device size change change */
if (child->props.size_changed)
dnode->props.size_changed = 1;
/* Resume device immediately if it has parents and its size changed */
if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
continue;
@ -1491,8 +1652,8 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
continue;
if (!_resume_node(child->name, child->info.major, child->info.minor,
child->props.read_ahead,
child->props.read_ahead_flags, &newinfo)) {
child->props.read_ahead, child->props.read_ahead_flags,
&newinfo, &child->dtree->cookie, child->udev_flags)) {
log_error("Unable to resume %s (%" PRIu32
":%" PRIu32 ")", child->name, child->info.major,
child->info.minor);
@ -1666,6 +1827,28 @@ int dm_tree_node_add_striped_target(struct dm_tree_node *node,
return 1;
}
int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
uint64_t size,
const char *cipher,
const char *chainmode,
const char *iv,
uint64_t iv_offset,
const char *key)
{
struct load_segment *seg;
if (!(seg = _add_segment(node, SEG_CRYPT, size)))
return_0;
seg->cipher = cipher;
seg->chainmode = chainmode;
seg->iv = iv;
seg->iv_offset = iv_offset;
seg->key = key;
return 1;
}
int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
uint32_t region_size,
unsigned clustered,
@ -1791,3 +1974,13 @@ int dm_tree_node_add_target_area(struct dm_tree_node *node,
return 1;
}
void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
{
node->dtree->cookie = cookie;
}
uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
{
return node->dtree->cookie;
}

View File

@ -1,6 +1,6 @@
#
# Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
# Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
#
# This file is part of LVM2.
#
@ -14,97 +14,79 @@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
top_builddir = @top_builddir@
VPATH = @srcdir@
SUBDIRS = doc include man scripts
ifeq ("@INTL@", "yes")
SUBDIRS += po
endif
SUBDIRS += lib tools daemons libdm
ifeq ($(MAKECMDGOALS),distclean)
SUBDIRS += daemons/clvmd \
daemons/dmeventd/plugins \
lib/format1 \
lib/format_pool \
lib/locking \
lib/mirror \
lib/snapshot \
test \
po
DISTCLEAN_TARGETS += lib/misc/configure.h
DISTCLEAN_DIRS += lcov_reports*
endif
include make.tmpl
libdm: include
lib: libdm
daemons: lib tools
tools: lib device-mapper
po: tools daemons
libdm.device-mapper: include.device-mapper
daemons.device-mapper: libdm.device-mapper
tools.device-mapper: libdm.device-mapper
device-mapper: tools.device-mapper daemons.device-mapper man.device-mapper
ifeq ("@INTL@", "yes")
lib.pofile: include.pofile
tools.pofile: lib.pofile
daemons.pofile: lib.pofile
po.pofile: tools.pofile daemons.pofile
pofile: po.pofile
endif
ifneq ("@CFLOW_CMD@", "")
tools.cflow: lib.cflow
cflow: tools.cflow
endif
ifneq ("@CSCOPE_CMD@", "")
cscope.out: tools
@CSCOPE_CMD@ -b -R
all: cscope.out
endif
check: all
$(MAKE) -C test all
ifneq ("@LCOV@", "")
.PHONY: lcov-reset lcov lcov-dated
ifeq ($(MAKECMDGOALS),lcov-dated)
LCOV_REPORTS_DIR=$(top_srcdir)/lcov_reports-$(shell date +%Y%m%d%k%M%S)
ifeq ("@FSADM@", "yes")
FSADMMAN = fsadm.8
else
LCOV_REPORTS_DIR=$(top_srcdir)/lcov_reports
FSADMMAN =
endif
lcov-reset:
$(LCOV) -d $(top_srcdir)/dmeventd --zerocounters
$(LCOV) -d $(top_srcdir)/libdm --zerocounters
$(LCOV) -d $(top_srcdir)/lib --zerocounters
$(LCOV) -d $(top_srcdir)/tools --zerocounters
MAN5=lvm.conf.5
MAN8=lvchange.8 lvconvert.8 lvcreate.8 lvdisplay.8 lvextend.8 lvm.8 \
lvmchange.8 lvmdiskscan.8 lvmdump.8 \
lvreduce.8 lvremove.8 lvrename.8 lvresize.8 lvs.8 \
lvscan.8 pvchange.8 pvck.8 pvcreate.8 pvdisplay.8 pvmove.8 pvremove.8 \
pvresize.8 pvs.8 pvscan.8 vgcfgbackup.8 vgcfgrestore.8 vgchange.8 \
vgck.8 vgcreate.8 vgconvert.8 vgdisplay.8 vgexport.8 vgextend.8 \
vgimport.8 vgimportclone.8 vgmerge.8 vgmknodes.8 vgreduce.8 vgremove.8 \
vgrename.8 vgs.8 vgscan.8 vgsplit.8 $(FSADMMAN)
MAN8CLUSTER=clvmd.8
MAN8DM=dmsetup.8
MAN5DIR=${mandir}/man5
MAN8DIR=${mandir}/man8
lcov: all
$(RM) -rf $(LCOV_REPORTS_DIR)
$(MKDIR_P) $(LCOV_REPORTS_DIR)
$(LCOV) -b ${top_srcdir}/libdm -d $(top_srcdir)/libdm -c -o $(LCOV_REPORTS_DIR)/libdm.info
$(LCOV) -b $(top_srcdir)/lib -d $(top_srcdir)/lib -c -o $(LCOV_REPORTS_DIR)/lib.info
$(LCOV) -b $(top_srcdir)/tools -d $(top_srcdir)/tools -c -o $(LCOV_REPORTS_DIR)/tools.info
DMEVENTD_INFO="$(LCOV_REPORTS_DIR)/dmeventd.info" ;\
DMEVENTD_INFO_A="-a $$DMEVENTDINFO" ;\
$(LCOV) -b $(top_srcdir)/dmeventd -d $(top_srcdir)/dmeventd -c -o $$DMEVENTD_INFO || DMEVENTD_INFO_A="" ;\
$(LCOV) $$DMEVENTD_INFO_A -a $(LCOV_REPORTS_DIR)/lib.info \
-a $(LCOV_REPORTS_DIR)/libdm.info \
-a $(LCOV_REPORTS_DIR)/tools.info \
-o $(LCOV_REPORTS_DIR)/lvm.info
ifneq ("@GENHTML@", "")
$(GENHTML) -o $(LCOV_REPORTS_DIR) -p $(top_srcdir) $(LCOV_REPORTS_DIR)/lvm.info
CLEAN_TARGETS=$(MAN5) $(MAN8) $(MAN8CLUSTER) $(FSADMMAN) $(MAN8DM)
include ../make.tmpl
ifneq ("@CLVMD@", "none")
install: install_cluster
endif
lcov-dated: lcov
all: man
endif
.PHONY: man
device-mapper: $(MAN8DM)
man: $(MAN5) $(MAN8) $(MAN8CLUSTER)
$(MAN5) $(MAN8) $(MAN8CLUSTER): Makefile
%: %.in
$(SED) -e "s/#VERSION#/$(LVM_VERSION)/" $< > $@
install_lvm2:
@echo "Installing $(MAN8) in $(MAN8DIR)"
@for f in $(MAN8); \
do \
$(RM) $(MAN8DIR)/$$f; \
@INSTALL@ -D $(OWNER) $(GROUP) -m 444 $$f $(MAN8DIR)/$$f; \
done
@echo "Installing $(MAN5) in $(MAN5DIR)"
@for f in $(MAN5); \
do \
$(RM) $(MAN5DIR)/$$f; \
@INSTALL@ -D $(OWNER) $(GROUP) -m 444 $$f $(MAN5DIR)/$$f; \
done
install_cluster:
@echo "Installing $(MAN8CLUSTER) in $(MAN8DIR)"
@for f in $(MAN8CLUSTER); \
do \
$(RM) $(MAN8DIR)/$$f; \
@INSTALL@ -D $(OWNER) $(GROUP) -m 444 $$f $(MAN8DIR)/$$f; \
done
install_device-mapper:
@echo "Installing $(MAN8DM) in $(MAN8DIR)"
@for f in $(MAN8DM); \
do \
$(RM) $(MAN8DIR)/$$f; \
@INSTALL@ -D $(OWNER) $(GROUP) -m 444 $$f $(MAN8DIR)/$$f; \
done
install: install_lvm2 install_device-mapper

View File

@ -1,6 +1,6 @@
.\" $NetBSD: fsadm.8,v 1.2 2009/02/18 12:16:13 haad Exp $
.\" $NetBSD: fsadm.8,v 1.3 2009/12/02 00:58:03 haad Exp $
.\"
.TH "FSADM" "8" "LVM TOOLS 2.02.44-cvs (02-17-09)" "Red Hat, Inc" "\""
.TH "FSADM" "8" "LVM TOOLS 2.02.56(1)-cvs (12-01-09)" "Red Hat, Inc" "\""
.SH "NAME"
fsadm \- utility to resize or check filesystem on a device
.SH "SYNOPSIS"
@ -11,42 +11,48 @@ fsadm \- utility to resize or check filesystem on a device
.RI [options]\ resize\ device\ [new_size[BKMGTEP]]
.SH "DESCRIPTION"
\fBfsadm\fR utility resizes or checks the filesystem on a device. It tries to use the same API for \fBExt2/3\fR, \fBReiserFS\fR and \fBXFS\fR filesystem and simply resize and filesystem check operation.
\fBfsadm\fR utility resizes or checks the filesystem on a device.
It tries to use the same API for \fBext2/ext3/ext4\fR,
\fBReiserFS\fR and \fBXFS\fR filesystem.
.SH "OPTIONS"
.TP
.TP
\fB\-h \-\-help\fR
\(em print help message
.TP
.TP
\fB\-v \-\-verbose\fR
\(em be more verbose
.TP
.TP
\fB\-e \-\-ext\-offline\fR
\(em unmount Ext2/3 filesystem before doing resize
.TP
\(em unmount ext2/ext3/ext4 filesystem before doing resize
.TP
\fB\-f \-\-force\fR
\(em bypass some sanity checks
.TP
.TP
\fB\-n \-\-dry\-run\fR
\(em print commands without running them
.TP
.TP
\fB\-y \-\-yes\fR
\(em answer "yes" at any prompts
.TP
.TP
\fBnew_size\fR
\(em Absolute number of filesystem blocks to be in the filesystem, or an absolute size using a suffix (in powers of 1024). If new_size is not supplied, the whole device is used.
\(em Absolute number of filesystem blocks to be in the filesystem,
or an absolute size using a suffix (in powers of 1024).
If new_size is not supplied, the whole device is used.
.SH "EXAMPLES"
"fsadm \-e \-y resize /dev/vg/test 1000M" tries to resize the size of the filesystem on logical volume /dev/vg/test. If /dev/vg/test contains Ext2/3 filesystem it will be unmounted prior the resize. All [y|n] questions will be answered 'y'.
"fsadm \-e \-y resize /dev/vg/test 1000M" tries to resize the filesystem
on logical volume /dev/vg/test. If /dev/vg/test contains ext2/ext3/ext4
filesystem it will be unmounted prior the resize.
All [y|n] questions will be answered 'y'.
.SH "ENVIRONMENT VARIABLES"
.TP
\fBTMPDIR\fP
.TP
\fBTMPDIR\fP
Where the temporary directory should be created.
.TP
.BR
.TP
.BR
.SH "SEE ALSO"
.BR lvm (8),
.BR lvresize (8),
.BR lvm (8),
.BR lvresize (8),
.BR lvm.conf (5),
.BR tune2fs (8),
.BR resize2fs (8),
@ -55,4 +61,3 @@ Where the temporary directory should be created.
.BR xfs_info (8),
.BR xfs_growfs (8),
.BR xfs_check (8)

7630
external/gpl2/lvm2/dist/po/lvm2.po vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
#ifndef _LVM_VERSION_H
#define LVM_VERSION @LVM_VERSION@
#endif