From 444feb840cfe5a24682398c1568d2c4a937f16d2 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Fri, 29 Mar 2019 14:52:25 -0400 Subject: [PATCH] Remove intermediate files from repo Signed-off-by: Adam Rutkowski --- .../cas_cache/layer_cache_management.c.orig | 1615 ----------------- .../volume/vol_atomic_dev_bottom.c.orig | 1217 ------------- .../volume/vol_block_dev_top.o.ur-safe | 2 - modules/cas_disk/exp_obj.o.ur-safe | 2 - 4 files changed, 2836 deletions(-) delete mode 100644 modules/cas_cache/layer_cache_management.c.orig delete mode 100644 modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig delete mode 100644 modules/cas_cache/volume/vol_block_dev_top.o.ur-safe delete mode 100644 modules/cas_disk/exp_obj.o.ur-safe diff --git a/modules/cas_cache/layer_cache_management.c.orig b/modules/cas_cache/layer_cache_management.c.orig deleted file mode 100644 index 08a26a42b..000000000 --- a/modules/cas_cache/layer_cache_management.c.orig +++ /dev/null @@ -1,1615 +0,0 @@ -/* -* Copyright(c) 2012-2019 Intel Corporation -* SPDX-License-Identifier: BSD-3-Clause-Clear -*/ - -#include "cas_cache.h" -#include "utils/utils_blk.h" -#include "threads.h" - -extern u32 max_writeback_queue_size; -extern u32 writeback_queue_unblock_size; -extern u32 metadata_layout; -extern u32 unaligned_io; -extern u32 seq_cut_off_mb; -extern u32 use_io_scheduler; - -int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id, - bool interruption) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - - result = ocf_mngt_core_flush(core, interruption); - -out: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_flush_device(ocf_cache_id_t id) -{ - int result; - ocf_cache_t cache; - - result = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_mngt_cache_flush(cache, true); - - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type) -{ - ocf_cache_t cache; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_mngt_cache_cleaning_set_policy(cache, type); - - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type) -{ - ocf_cleaning_t tmp_type; - ocf_cache_t cache; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_mngt_cache_cleaning_get_policy(cache, &tmp_type); - - if (result == 0) - *type = tmp_type; - - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, - uint32_t param_id, uint32_t param_value) -{ - ocf_cache_t cache; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_mngt_cache_cleaning_set_param(cache, type, - param_id, param_value); - - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type, - uint32_t param_id, uint32_t *param_value) -{ - ocf_cache_t cache; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_mngt_cache_cleaning_get_param(cache, type, - param_id, param_value); - - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -struct get_paths_ctx { - char *core_path_name_tab; - int max_count; - int position; -}; - -int _cache_mng_core_pool_get_paths_visitor(ocf_uuid_t uuid, void *ctx) -{ - struct get_paths_ctx *visitor_ctx = ctx; - - if (visitor_ctx->position >= visitor_ctx->max_count) - return 0; - - if (copy_to_user((void __user *)visitor_ctx->core_path_name_tab + - (visitor_ctx->position * MAX_STR_LEN), - uuid->data, uuid->size)) { - return -ENODATA; - } - - visitor_ctx->position++; - - return 0; -} - -int cache_mng_core_pool_get_paths(struct kcas_core_pool_path *cmd_info) -{ - struct get_paths_ctx visitor_ctx = {0}; - int result; - - visitor_ctx.core_path_name_tab = cmd_info->core_path_tab; - visitor_ctx.max_count = cmd_info->core_pool_count; - - result = ocf_mngt_core_pool_visit(cas_ctx, - _cache_mng_core_pool_get_paths_visitor, - &visitor_ctx); - - cmd_info->core_pool_count = visitor_ctx.position; - return result; -} - -int cache_mng_core_pool_remove(struct kcas_core_pool_remove *cmd_info) -{ - struct ocf_volume_uuid uuid; - ocf_volume_t vol; - - uuid.data = cmd_info->core_path_name; - uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN); - - vol = ocf_mngt_core_pool_lookup(cas_ctx, &uuid, - ocf_ctx_get_volume_type(cas_ctx, - BLOCK_DEVICE_OBJECT)); - if (!vol) - return -OCF_ERR_CORE_NOT_AVAIL; - - ocf_volume_close(vol); - ocf_mngt_core_pool_remove(cas_ctx, vol); - - return 0; -} - -struct cache_mng_metadata_probe_context { - struct completion compl; - struct kcas_cache_check_device *cmd_info; - int *result; -}; - -static void cache_mng_metadata_probe_end(void *priv, int error, - struct ocf_metadata_probe_status *status) -{ - struct cache_mng_metadata_probe_context *context = priv; - struct kcas_cache_check_device *cmd_info = context->cmd_info; - - *context->result = error; - - if (error == -ENODATA || error == -EBADF) { - cmd_info->is_cache_device = false; - context->result = 0; - } else if (error == 0) { - cmd_info->is_cache_device = true; - cmd_info->clean_shutdown = status->clean_shutdown; - cmd_info->cache_dirty = status->cache_dirty; - } - - complete(&context->compl); -} - -int cache_mng_cache_check_device(struct kcas_cache_check_device *cmd_info) -{ - struct cache_mng_metadata_probe_context context; - struct block_device *bdev; - ocf_volume_t volume; - char holder[] = "CAS CHECK CACHE DEVICE\n"; - int result; - - bdev = OPEN_BDEV_EXCLUSIVE(cmd_info->path_name, FMODE_READ, holder); - if (IS_ERR(bdev)) { - return (PTR_ERR(bdev) == -EBUSY) ? - -OCF_ERR_NOT_OPEN_EXC : - -OCF_ERR_INVAL_VOLUME_TYPE; - } - - result = cas_blk_open_volume_by_bdev(&volume, bdev); - if (result) - goto out_bdev; - - cmd_info->format_atomic = (ocf_ctx_get_volume_type_id(cas_ctx, - ocf_volume_get_type(volume)) == ATOMIC_DEVICE_OBJECT); - - init_completion(&context.compl); - context.cmd_info = cmd_info; - context.result = &result; - - ocf_metadata_probe(cas_ctx, volume, cache_mng_metadata_probe_end, - &context); -<<<<<<< HEAD -======= - ->>>>>>> 05169c4c2... ASYNC LOAD/ATTACH - wait_for_completion(&context.compl); - - cas_blk_close_volume(volume); -out_bdev: - CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); - return result; -} - -int cache_mng_prepare_core_cfg(struct ocf_mngt_core_config *cfg, - struct kcas_insert_core *cmd_info) -{ - int result; - - if (strnlen(cmd_info->core_path_name, MAX_STR_LEN) >= MAX_STR_LEN) - return -OCF_ERR_INVAL; - - memset(cfg, 0, sizeof(*cfg)); - cfg->uuid.data = cmd_info->core_path_name; - cfg->uuid.size = strnlen(cmd_info->core_path_name, MAX_STR_LEN) + 1; - cfg->core_id = cmd_info->core_id; - cfg->cache_id = cmd_info->cache_id; - cfg->try_add = cmd_info->try_add; - - if (cas_upgrade_is_in_upgrade()) { - cfg->volume_type = BLOCK_DEVICE_OBJECT; - return 0; - } - - if (cmd_info->update_path) - return 0; - - result = cas_blk_identify_type(cfg->uuid.data, &cfg->volume_type); - if (!result && cfg->volume_type == ATOMIC_DEVICE_OBJECT) - result = -KCAS_ERR_NVME_BAD_FORMAT; - if (OCF_ERR_NOT_OPEN_EXC == abs(result)) { - printk(KERN_WARNING OCF_PREFIX_SHORT - "Cannot open device %s exclusively. " - "It is already opened by another program!\n", - cmd_info->core_path_name); - } - - return result; -} - -int cache_mng_update_core_uuid(ocf_cache_t cache, ocf_core_id_t id, ocf_uuid_t uuid) -{ - ocf_core_t core; - ocf_volume_t vol; - struct block_device *bdev; - struct bd_object *bdvol; - bool match; - int result; - - if (ocf_core_get(cache, id, &core)) { - /* no such core */ - return -ENODEV; - } - - if (ocf_core_get_state(core) != ocf_core_state_active) { - /* core inactive */ - return -ENODEV; - } - - /* get bottom device volume for this core */ - vol = ocf_core_get_volume(core); - bdvol = bd_object(vol); - - /* lookup block device object for device pointed by uuid */ - bdev = LOOKUP_BDEV(uuid->data); - if (IS_ERR(bdev)) { - printk(KERN_ERR "failed to lookup bdev%s\n", (char*)uuid->data); - return -ENODEV; - } - - /* check whether both core id and uuid point to the same block device */ - match = (bdvol->btm_bd == bdev); - - bdput(bdev); - - if (match) { - result = ocf_core_set_uuid(core, uuid); - } else { - printk(KERN_ERR "UUID provided does not match target core device\n"); - result = -ENODEV; - } - - return result; -} - -static void _cache_mng_log_core_device_path(ocf_core_t core) -{ - ocf_cache_t cache = ocf_core_get_cache(core); - const ocf_uuid_t core_uuid = (const ocf_uuid_t)ocf_core_get_uuid(core); - - printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as core %s " - "to cache %s\n", (const char*)core_uuid->data, - ocf_core_get_name(core), ocf_cache_get_name(cache)); -} - -static int _cache_mng_log_core_device_path_visitor(ocf_core_t core, void *cntx) -{ - _cache_mng_log_core_device_path(core); - - return 0; -} - -/************************************************************ - * Function for adding a CORE object to the cache instance. * - ************************************************************/ -int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg, - struct kcas_insert_core *cmd_info) -{ - int result; - ocf_cache_t cache; - ocf_core_t core; - ocf_core_id_t core_id; - - result = ocf_mngt_cache_get(cas_ctx, cfg->cache_id, &cache); - if (cfg->try_add && (result == -OCF_ERR_CACHE_NOT_EXIST)) { - result = ocf_mngt_core_pool_add(cas_ctx, &cfg->uuid, - cfg->volume_type); - if (result) { - cmd_info->ext_err_code = - -OCF_ERR_CANNOT_ADD_CORE_TO_POOL; - printk(KERN_ERR OCF_PREFIX_SHORT - "Error occurred during" - " adding core to detached core pool\n"); - } else { - printk(KERN_INFO OCF_PREFIX_SHORT - "Successfully added" - " core to core pool\n"); - } - return result; - } else if (result) { - return result; - } - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - if (cmd_info && cmd_info->update_path) { - result = cache_mng_update_core_uuid(cache, cfg->core_id, &cfg->uuid); - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; - } - - cfg->seq_cutoff_threshold = seq_cut_off_mb * MiB; - - result = ocf_mngt_cache_add_core(cache, &core, cfg); - if (result) - goto error_affter_lock; - - core_id = ocf_core_get_id(core); - - result = block_dev_create_exported_object(core); - if (result) - goto error_after_add_core; - - result = block_dev_activate_exported_object(core); - if (result) - goto error_after_create_exported_object; - - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - - if (cmd_info) - cmd_info->core_id = core_id; - - _cache_mng_log_core_device_path(core); - - return 0; - -error_after_create_exported_object: - block_dev_destroy_exported_object(core); - -error_after_add_core: - ocf_mngt_cache_remove_core(core); - -error_affter_lock: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - - return result; -} - -/* Flush cache and destroy exported object */ -int _cache_mng_remove_core_prepare(ocf_cache_t cache, ocf_core_t core, - struct kcas_remove_core *cmd, bool destroy) -{ - int result = 0; - int flush_result = 0; - bool core_active; - bool flush_interruptible = !destroy; - - core_active = (ocf_core_get_state(core) == ocf_core_state_active); - - if (cmd->detach && !core_active) { - printk(KERN_WARNING OCF_PREFIX_SHORT - "Cannot detach core which " - "is already inactive!\n"); - return -OCF_ERR_CORE_IN_INACTIVE_STATE; - } - - if (core_active && destroy) { - result = block_dev_destroy_exported_object(core); - if (result) - return result; - } - - if (!cmd->force_no_flush) { - if (core_active) { - /* Flush core */ - flush_result = ocf_mngt_core_flush(core, - flush_interruptible); - } else { - printk(KERN_WARNING OCF_PREFIX_SHORT - "Cannot remove inactive core " - "without force option\n"); - return -OCF_ERR_CORE_IN_INACTIVE_STATE; - } - } - - if (flush_result) - result = destroy ? -KCAS_ERR_REMOVED_DIRTY : flush_result; - - return result; -} - -/**************************************************************** - * Function for removing a CORE object from the cache instance - */ -int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd) -{ - int result, flush_result = 0; - ocf_cache_t cache; - ocf_core_t core; - - result = ocf_mngt_cache_get(cas_ctx, cmd->cache_id, &cache); - if (result) - return result; - - if (!cmd->force_no_flush) { - /* First check state and flush data (if requested by user) - under read lock */ - result = ocf_mngt_cache_read_lock(cache); - if (result) - goto put; - - result = ocf_core_get(cache, cmd->core_id, &core); - if (result < 0) - goto rd_unlock; - - result = _cache_mng_remove_core_prepare(cache, core, cmd, - false); - if (result) - goto rd_unlock; - - ocf_mngt_cache_read_unlock(cache); - } - - /* Acquire write lock */ - result = ocf_mngt_cache_lock(cache); - if (result) - goto put; - - result = ocf_core_get(cache, cmd->core_id, &core); - if (result < 0) { - goto unlock; - } - - /* - * Destroy exported object and flush core again but don't allow for - * interruption - in case of flush error after exported object had been - * destroyed, instead of trying rolling this back we rather detach core - * and then inform user about error. - */ - result = _cache_mng_remove_core_prepare(cache, core, cmd, true); - if (result == -KCAS_ERR_REMOVED_DIRTY) { - flush_result = result; - result = 0; - } else if (result) { - goto unlock; - } - - if (cmd->detach || flush_result) - result = ocf_mngt_cache_detach_core(core); - else - result = ocf_mngt_cache_remove_core(core); - - if (!result && flush_result) - result = flush_result; - -unlock: - ocf_mngt_cache_unlock(cache); -put: - ocf_mngt_cache_put(cache); - return result; - -rd_unlock: - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_reset_core_stats(ocf_cache_id_t cache_id, - ocf_core_id_t core_id) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - - ocf_core_stats_initialize(core); - -out: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return 0; -} - -static inline void io_class_info2cfg(ocf_part_id_t part_id, - struct ocf_io_class_info *info, struct ocf_mngt_io_class_config *cfg) -{ - cfg->class_id = part_id; - cfg->name = info->name; - cfg->prio = info->priority; - cfg->cache_mode = info->cache_mode; - cfg->min_size = info->min_size; - cfg->max_size = info->max_size; -} - -int cache_mng_set_partitions(struct kcas_io_classes *cfg) -{ - ocf_cache_t cache; - struct ocf_mngt_io_classes_config *io_class_cfg; - ocf_part_id_t class_id; - int result; - - io_class_cfg = kzalloc(sizeof(struct ocf_mngt_io_class_config) * - OCF_IO_CLASS_MAX, GFP_KERNEL); - if (!io_class_cfg) - return -OCF_ERR_NO_MEM; - - for (class_id = 0; class_id < OCF_IO_CLASS_MAX; class_id++) { - io_class_cfg->config[class_id].class_id = class_id; - - if (!cfg->info[class_id].name[0]) { - io_class_cfg->config[class_id].class_id = class_id; - continue; - } - - io_class_info2cfg(class_id, &cfg->info[class_id], - &io_class_cfg->config[class_id]); - } - - result = ocf_mngt_cache_get(cas_ctx, cfg->cache_id, &cache); - if (result) - goto err; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - goto err; - } - - result = ocf_mngt_cache_io_classes_configure(cache, io_class_cfg); - - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - -err: - kfree(io_class_cfg); - - return result; -} - -static int _cache_mng_create_exported_object(ocf_core_t core, void *cntx) -{ - int result; - ocf_cache_t cache = ocf_core_get_cache(core); - - result = block_dev_create_exported_object(core); - if (result) { - printk(KERN_ERR "Cannot to create exported object, " - "cache id = %u, core id = %u\n", - ocf_cache_get_id(cache), - ocf_core_get_id(core)); - return result; - } - - result = block_dev_activate_exported_object(core); - if (result) { - printk(KERN_ERR "Cannot to activate exported object, " - "cache id = %u, core id = %u\n", - ocf_cache_get_id(cache), - ocf_core_get_id(core)); - } - - return result; -} - -static int _cache_mng_destroy_exported_object(ocf_core_t core, void *cntx) -{ - if (block_dev_destroy_exported_object(core)) { - ocf_cache_t cache = ocf_core_get_cache(core); - - printk(KERN_ERR "Cannot to destroy exported object, " - "cache id = %u, core id = %u\n", - ocf_cache_get_id(cache), - ocf_core_get_id(core)); - } - - return 0; -} - -static int cache_mng_initialize_core_objects(ocf_cache_t cache) -{ - int result; - - result = ocf_core_visit(cache, _cache_mng_create_exported_object, NULL, - true); - if (result) { - /* Need to cleanup */ - ocf_core_visit(cache, _cache_mng_destroy_exported_object, NULL, - true); - } - - return result; -} - -int cache_mng_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg, - struct ocf_mngt_cache_device_config *device_cfg, - struct kcas_start_cache *cmd) -{ - int init_cache, result; - struct atomic_dev_params atomic_params = { 0 }; - struct block_device *bdev; - int part_count; - char holder[] = "CAS START\n"; - bool is_part; - - if (strnlen(cmd->cache_path_name, MAX_STR_LEN) >= MAX_STR_LEN) - return -OCF_ERR_INVAL; - - memset(cfg, 0, sizeof(*cfg)); - - cfg->id = cmd->cache_id; - cfg->cache_mode = cmd->caching_mode; - cfg->cache_line_size = cmd->line_size; - cfg->eviction_policy = cmd->eviction_policy; - cfg->cache_line_size = cmd->line_size; - cfg->pt_unaligned_io = !unaligned_io; - cfg->use_submit_io_fast = !use_io_scheduler; - cfg->locked = true; - cfg->metadata_volatile = false; - cfg->metadata_layout = metadata_layout; - - cfg->backfill.max_queue_size = max_writeback_queue_size; - cfg->backfill.queue_unblock_size = writeback_queue_unblock_size; - - device_cfg->uuid.data = cmd->cache_path_name; - device_cfg->uuid.size = strnlen(device_cfg->uuid.data, MAX_STR_LEN) + 1; - device_cfg->cache_line_size = cmd->line_size; - device_cfg->force = cmd->force; - device_cfg->perform_test = true; - device_cfg->discard_on_start = true; - - init_cache = cmd->init_cache; - - switch (init_cache) { - case CACHE_INIT_NEW: - case CACHE_INIT_LOAD: - break; - default: - return -OCF_ERR_INVAL; - } - - bdev = OPEN_BDEV_EXCLUSIVE(device_cfg->uuid.data, FMODE_READ, holder); - if (IS_ERR(bdev)) { - return (PTR_ERR(bdev) == -EBUSY) ? - -OCF_ERR_NOT_OPEN_EXC : - -OCF_ERR_INVAL_VOLUME_TYPE; - } - - is_part = (bdev->bd_contains != bdev); - part_count = cas_blk_get_part_count(bdev); - CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ); - - if (!is_part && part_count > 1 && !device_cfg->force) - return -KCAS_ERR_CONTAINS_PART; - - result = cas_blk_identify_type_atomic(device_cfg->uuid.data, - &device_cfg->volume_type, &atomic_params); - if (result) - return result; - - cmd->metadata_mode_optimal = - block_dev_is_metadata_mode_optimal(&atomic_params, - device_cfg->volume_type); - - return 0; -} - -static void _cache_mng_log_cache_device_path(ocf_cache_t cache, - struct ocf_mngt_cache_device_config *device_cfg) -{ - printk(KERN_INFO OCF_PREFIX_SHORT "Adding device %s as cache %s\n", - (const char*)device_cfg->uuid.data, - ocf_cache_get_name(cache)); -} - -static void _cas_queue_kick(ocf_queue_t q) -{ - return cas_kick_queue_thread(q); -} - -static void _cas_queue_stop(ocf_queue_t q) -{ - return cas_stop_queue_thread(q); -} - - -const struct ocf_queue_ops queue_ops = { - .kick = _cas_queue_kick, - .stop = _cas_queue_stop, -}; - -static int _cache_mng_start_queues(ocf_cache_t cache) -{ - uint32_t queues_no = num_online_cpus(); - ocf_queue_t *queue_map; - int result, i; - - queue_map = kcalloc(queues_no, sizeof(*queue_map), GFP_KERNEL); - if (!queue_map) - return -ENOMEM; - - for (i = 0; i < queues_no; i++) { - result = ocf_queue_create(cache, &queue_map[i], &queue_ops); - if (result) - goto err; - - result = cas_create_queue_thread(queue_map[i], i); - if (result) { - ocf_queue_put(queue_map[i]); - goto err; - } - } - - ocf_cache_set_flush_queue(cache, queue_map[0]); - - ocf_cache_set_priv(cache, queue_map); - - return 0; -err: - while (--i >= 0) - ocf_queue_put(queue_map[i]); - - kfree(queue_map); - - return result; -} -struct _cache_mng_attach_context { - struct completion compl; - int *result; -}; - -static void _cache_mng_attach_complete(void *priv, int error) -{ - struct _cache_mng_attach_context *context = priv; - - *context->result = error; - complete(&context->compl); -} - -static int _cache_mng_start(struct ocf_mngt_cache_config *cfg, - struct ocf_mngt_cache_device_config *device_cfg, ocf_cache_t *cache) -{ - struct _cache_mng_attach_context context; - ocf_queue_t *queue_map; - ocf_cache_t tmp_cache; - int result; - - result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); - if (result) - return result; - - result = _cache_mng_start_queues(tmp_cache); - if (result) - goto queues_err; - - init_completion(&context.compl); - context.result = &result; - - ocf_mngt_cache_attach(tmp_cache, device_cfg, - _cache_mng_attach_complete, &context); - - wait_for_completion(&context.compl); - if (result) - goto attach_err; - - _cache_mng_log_cache_device_path(tmp_cache, device_cfg); - - *cache = tmp_cache; - - return 0; - -attach_err: - queue_map = (ocf_queue_t *)ocf_cache_get_priv(tmp_cache); - kfree(queue_map); -queues_err: - ocf_mngt_cache_stop(tmp_cache); - ocf_mngt_cache_unlock(tmp_cache); - return result; -} - -struct _cache_mng_load_context { - struct completion compl; - int *result; -}; - -static void _cache_mng_load_complete(void *priv, int error) -{ - struct _cache_mng_load_context *context = priv; - - *context->result = error; - complete(&context->compl); -} - -static int _cache_mng_load(struct ocf_mngt_cache_config *cfg, - struct ocf_mngt_cache_device_config *device_cfg, ocf_cache_t *cache) -{ - struct _cache_mng_load_context context; - ocf_queue_t *queue_map; - ocf_cache_t tmp_cache; - int result; - - result = ocf_mngt_cache_start(cas_ctx, &tmp_cache, cfg); - if (result) - return result; - - result = _cache_mng_start_queues(tmp_cache); - if (result) - goto queues_err; - - init_completion(&context.compl); - context.result = &result; - - ocf_mngt_cache_load(tmp_cache, device_cfg, - _cache_mng_load_complete, &context); - - wait_for_completion(&context.compl); - if (result) - goto load_err; - - _cache_mng_log_cache_device_path(tmp_cache, device_cfg); - - result = cache_mng_initialize_core_objects(tmp_cache); - if (result) - goto load_err; - - ocf_core_visit(tmp_cache, _cache_mng_log_core_device_path_visitor, - NULL, false); - - *cache = tmp_cache; - - return 0; - -load_err: - queue_map = (ocf_queue_t *)ocf_cache_get_priv(tmp_cache); - kfree(queue_map); -queues_err: - ocf_mngt_cache_stop(*cache); - return result; -} - -int cache_mng_init_instance(struct ocf_mngt_cache_config *cfg, - struct ocf_mngt_cache_device_config *device_cfg, - struct kcas_start_cache *cmd) -{ - ocf_cache_t cache; - const char *name; - bool load = (cmd && cmd->init_cache == CACHE_INIT_LOAD); - int result; - - if (!try_module_get(THIS_MODULE)) - return -KCAS_ERR_SYSTEM; - - if (cmd) - cmd->min_free_ram = device_cfg->min_free_ram; - - /* Start cache. Returned cache instance will be locked as it was set - * in configuration. - */ - if (!load) - result = _cache_mng_start(cfg, device_cfg, &cache); - else - result = _cache_mng_load(cfg, device_cfg, &cache); - - if (result) { - module_put(THIS_MODULE); - return result; - } - - if (cmd) { - ocf_volume_t cache_obj = ocf_cache_get_volume(cache); - struct bd_object *bd_cache_obj = bd_object(cache_obj); - struct block_device *bdev = bd_cache_obj->btm_bd; - - /* If we deal with whole device, reread partitions */ - if (bdev->bd_contains == bdev) - ioctl_by_bdev(bdev, BLKRRPART, (unsigned long)NULL); - - /* Set other back information */ - name = block_dev_get_elevator_name( - casdsk_disk_get_queue(bd_cache_obj->dsk)); - if (name) - strlcpy(cmd->cache_elevator, - name, MAX_ELEVATOR_NAME); - } - - ocf_mngt_cache_unlock(cache); - - return 0; -} - -/** - * @brief routine implementing dynamic sequential cutoff parameter switching - * @param[in] cache_id cache id to which the change pertains - * @param[in] core_id core id to which the change pertains - * or OCF_CORE_ID_INVALID for setting value for all cores - * attached to specified cache - * @param[in] thresh new sequential cutoff threshold value - * @return exit code of successful completion is 0; - * nonzero exit code means failure - */ - -int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t cache_id, ocf_core_id_t core_id, - uint32_t thresh) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - if (core_id != OCF_CORE_ID_INVALID) { - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - result = ocf_mngt_core_set_seq_cutoff_threshold(core, thresh); - } else { - result = ocf_mngt_core_set_seq_cutoff_threshold_all(cache, - thresh); - } - -out: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -/** - * @brief routine implementing dynamic sequential cutoff parameter switching - * @param[in] id cache id to which the change pertains - * @param[in] core_id core id to which the change pertains - * or OCF_CORE_ID_INVALID for setting value for all cores - * attached to specified cache - * @param[in] policy new sequential cutoff policy value - * @return exit code of successful completion is 0; - * nonzero exit code means failure - */ - -int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, - ocf_seq_cutoff_policy policy) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - if (core_id != OCF_CORE_ID_INVALID) { - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - result = ocf_mngt_core_set_seq_cutoff_policy(core, policy); - } else { - result = ocf_mngt_core_set_seq_cutoff_policy_all(cache, policy); - } - -out: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -/** - * @brief routine implementing dynamic sequential cutoff parameter switching - * @param[in] cache_id cache id to which the change pertains - * @param[in] core_id core id to which the change pertains - * or OCF_CORE_ID_INVALID for setting value for all cores - * attached to specified cache - * @param[out] thresh new sequential cutoff threshold value - * @return exit code of successful completion is 0; - * nonzero exit code means failure - */ - -int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t cache_id, - ocf_core_id_t core_id, uint32_t *thresh) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - - result = ocf_mngt_core_get_seq_cutoff_threshold(core, thresh); - -out: - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -/** - * @brief routine implementing dynamic sequential cutoff parameter switching - * @param[in] id cache id to which the change pertains - * @param[in] core_id core id to which the change pertains - * or OCF_CORE_ID_INVALID for setting value for all cores - * attached to specified cache - * @param[out] policy new sequential cutoff policy value - * @return exit code of successful completion is 0; - * nonzero exit code means failure - */ - -int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id, - ocf_seq_cutoff_policy *policy) -{ - ocf_cache_t cache; - ocf_core_t core; - int result; - - result = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_core_get(cache, core_id, &core); - if (result) - goto out; - - result = ocf_mngt_core_get_seq_cutoff_policy(core, policy); - -out: - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -/** - * @brief routine implementing dynamic cache mode switching - * @param device caching device to which operation applies - * @param mode target mode (WRITE_THROUGH, WRITE_BACK, WRITE_AROUND etc.) - * @param flush shall we flush dirty data during switch, or shall we flush - * all remaining dirty data before entering new mode? - */ - -int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode, - uint8_t flush) -{ - int result; - ocf_cache_t cache; - - result = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - if (flush) { - result = ocf_mngt_cache_flush(cache, true); - if (result) - goto out; - } - - result = ocf_mngt_cache_set_mode(cache, mode, flush); - -out: - ocf_mngt_cache_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -/** - * @brief routine implements --remove-cache command. - * @param[in] device caching device to be removed - * @param[in] flush Boolean: shall we flush dirty data before removing cache. - * if yes, flushing may still be interrupted by user (in which case - * device won't be actually removed and error will be returned) - * @param[in] allow_interruption shall we allow interruption of dirty - * data flushing - */ -int cache_mng_exit_instance(ocf_cache_id_t id, int flush) -{ - ocf_cache_t cache; - ocf_queue_t *queue_map; - int status, flush_status = 0; - - /* Get cache */ - status = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (status) - return status; - - queue_map = (ocf_queue_t *)ocf_cache_get_priv(cache); - - status = ocf_mngt_cache_read_lock(cache); - if (status) - goto put; - /* - * Flush cache. Flushing may take a long time, so we allow user - * to interrupt this operation. Hence we do first flush before - * disabling exported object to avoid restoring it in case - * of interruption. That means some new dirty data could appear - * in cache during flush operation which will not be flushed - * this time, so we need to flush cache again after disabling - * exported object. The second flush should be much faster. - */ - if (flush) { - status = ocf_mngt_cache_flush(cache, true); - switch (status) { - case -OCF_ERR_CACHE_IN_INCOMPLETE_STATE: - case -OCF_ERR_FLUSHING_INTERRUPTED: - ocf_mngt_cache_read_unlock(cache); - goto put; - default: - flush_status = status; - break; - } - } - - ocf_mngt_cache_read_unlock(cache); - - /* get cache write lock */ - status = ocf_mngt_cache_lock(cache); - if (status) - goto put; - - if (!cas_upgrade_is_in_upgrade()) { - /* If we are not in upgrade - destroy cache devices */ - status = block_dev_destroy_all_exported_objects(cache); - if (status != 0) { - printk(KERN_WARNING - "Failed to remove all cached devices\n"); - goto unlock; - } - } else { - if (flush_status) { - status = flush_status; - goto unlock; - } - /* - * We are being switched to upgrade in flight mode - - * wait for finishing pending core requests - */ - cache_mng_wait_for_rq_finish(cache); - } - - /* Flush cache again. This time we don't allow interruption. */ - if (flush) - flush_status = ocf_mngt_cache_flush(cache, false); - - /* Stop cache device */ - status = ocf_mngt_cache_stop(cache); - - if (!status && flush_status) - status = -KCAS_ERR_STOPPED_DIRTY; - - module_put(THIS_MODULE); - - kfree(queue_map); - -unlock: - ocf_mngt_cache_unlock(cache); -put: - ocf_mngt_cache_put(cache); - return status; -} - -static int cache_mng_list_caches_visitor(ocf_cache_t cache, void *cntx) -{ - ocf_cache_id_t id = ocf_cache_get_id(cache); - struct kcas_cache_list *list = cntx; - - if (list->id_position >= id) - return 0; - - if (list->in_out_num >= ARRAY_SIZE(list->cache_id_tab)) - return 1; - - list->cache_id_tab[list->in_out_num] = id; - list->in_out_num++; - - return 0; -} - -int cache_mng_list_caches(struct kcas_cache_list *list) -{ - list->in_out_num = 0; - return ocf_mngt_cache_visit(cas_ctx, cache_mng_list_caches_visitor, list); -} - -int cache_mng_interrupt_flushing(ocf_cache_id_t id) -{ - int result; - ocf_cache_t cache; - - result = ocf_mngt_cache_get(cas_ctx, id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_flush_interrupt(cache); - - ocf_mngt_cache_put(cache); - - return result; - -} - -int cache_mng_get_info(struct kcas_cache_info *info) -{ - uint32_t i, j; - int result; - ocf_cache_t cache; - ocf_core_t core; - const struct ocf_volume_uuid *uuid; - - result = ocf_mngt_cache_get(cas_ctx, info->cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) - goto put; - - result = ocf_cache_get_info(cache, &info->info); - if (result) - goto unlock; - - if (info->info.attached) { - uuid = ocf_cache_get_uuid(cache); - strlcpy(info->cache_path_name, uuid->data, - min(sizeof(info->cache_path_name), uuid->size)); - - switch (info->info.volume_type) { - case BLOCK_DEVICE_OBJECT: - info->metadata_mode = CAS_METADATA_MODE_NORMAL; - break; - case ATOMIC_DEVICE_OBJECT: - info->metadata_mode = CAS_METADATA_MODE_ATOMIC; - break; - default: - info->metadata_mode = CAS_METADATA_MODE_INVALID; - break; - } - } - - /* Collect cores IDs */ - for (i = 0, j = 0; j < info->info.core_count && - i < OCF_CORE_MAX; i++) { - if (ocf_core_get(cache, i, &core)) - continue; - - info->core_id[j] = i; - j++; - } - -unlock: - ocf_mngt_cache_read_unlock(cache); -put: - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_get_io_class_info(struct kcas_io_class *part) -{ - int result; - ocf_cache_id_t cache_id = part->cache_id; - ocf_core_id_t core_id = part->core_id; - uint32_t io_class_id = part->class_id; - ocf_cache_t cache; - ocf_core_t core; - - result = ocf_mngt_cache_get(cas_ctx, cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if (result) { - ocf_mngt_cache_put(cache); - return result; - } - - result = ocf_cache_io_class_get_info(cache, io_class_id, &part->info); - if (result) - goto end; - - if (part->get_stats) { - result = ocf_core_get(cache, core_id, &core); - if (result < 0) { - result = OCF_ERR_CORE_NOT_AVAIL; - goto end; - } - - result = ocf_core_io_class_get_stats(core, io_class_id, - &part->stats); - } - -end: - ocf_mngt_cache_read_unlock(cache); - ocf_mngt_cache_put(cache); - return result; -} - -int cache_mng_get_core_info(struct kcas_core_info *info) -{ - ocf_cache_t cache; - ocf_core_t core; - const struct ocf_volume_uuid *uuid; - int result; - - result = ocf_mngt_cache_get(cas_ctx, info->cache_id, &cache); - if (result) - return result; - - result = ocf_mngt_cache_read_lock(cache); - if(result) - goto put; - - result = ocf_core_get(cache, info->core_id, &core); - if (result < 0) { - result = OCF_ERR_CORE_NOT_AVAIL; - goto unlock; - } - - result = ocf_core_get_stats(core, &info->stats); - if (result) - goto unlock; - - uuid = ocf_core_get_uuid(core); - - strlcpy(info->core_path_name, uuid->data, - min(sizeof(info->core_path_name), uuid->size)); - - info->state = ocf_core_get_state(core); - -unlock: - ocf_mngt_cache_read_unlock(cache); -put: - ocf_mngt_cache_put(cache); - return result; -} - -static int cache_mng_wait_for_rq_finish_visitor(ocf_core_t core, void *cntx) -{ - ocf_volume_t obj = ocf_core_get_volume(core); - struct bd_object *bdobj = bd_object(obj); - - while (atomic64_read(&bdobj->pending_rqs)) - io_schedule(); - - return 0; -} - -void cache_mng_wait_for_rq_finish(ocf_cache_t cache) -{ - ocf_core_visit(cache, cache_mng_wait_for_rq_finish_visitor, NULL, true); -} - -int cache_mng_set_core_params(struct kcas_set_core_param *info) -{ - switch (info->param_id) { - case core_param_seq_cutoff_threshold: - return cache_mng_set_seq_cutoff_threshold(info->cache_id, - info->core_id, info->param_value); - case core_param_seq_cutoff_policy: - return cache_mng_set_seq_cutoff_policy(info->cache_id, - info->core_id, info->param_value); - default: - return -EINVAL; - } -} - -int cache_mng_get_core_params(struct kcas_get_core_param *info) -{ - switch (info->param_id) { - case core_param_seq_cutoff_threshold: - return cache_mng_get_seq_cutoff_threshold(info->cache_id, - info->core_id, &info->param_value); - case core_param_seq_cutoff_policy: - return cache_mng_get_seq_cutoff_policy(info->cache_id, - info->core_id, &info->param_value); - default: - return -EINVAL; - } -} - -int cache_mng_set_cache_params(struct kcas_set_cache_param *info) -{ - switch (info->param_id) { - case cache_param_cleaning_policy_type: - return cache_mng_set_cleaning_policy(info->cache_id, - info->param_value); - - case cache_param_cleaning_alru_wake_up_time: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_wake_up_time, - info->param_value); - case cache_param_cleaning_alru_stale_buffer_time: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_stale_buffer_time, - info->param_value); - case cache_param_cleaning_alru_flush_max_buffers: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_flush_max_buffers, - info->param_value); - case cache_param_cleaning_alru_activity_threshold: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_activity_threshold, - info->param_value); - - case cache_param_cleaning_acp_wake_up_time: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_acp, ocf_acp_wake_up_time, - info->param_value); - case cache_param_cleaning_acp_flush_max_buffers: - return cache_mng_set_cleaning_param(info->cache_id, - ocf_cleaning_acp, ocf_acp_flush_max_buffers, - info->param_value); - default: - return -EINVAL; - } -} - -int cache_mng_get_cache_params(struct kcas_get_cache_param *info) -{ - switch (info->param_id) { - case cache_param_cleaning_policy_type: - return cache_mng_get_cleaning_policy(info->cache_id, - &info->param_value); - - case cache_param_cleaning_alru_wake_up_time: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_wake_up_time, - &info->param_value); - case cache_param_cleaning_alru_stale_buffer_time: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_stale_buffer_time, - &info->param_value); - case cache_param_cleaning_alru_flush_max_buffers: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_flush_max_buffers, - &info->param_value); - case cache_param_cleaning_alru_activity_threshold: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_alru, ocf_alru_activity_threshold, - &info->param_value); - - case cache_param_cleaning_acp_wake_up_time: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_acp, ocf_acp_wake_up_time, - &info->param_value); - case cache_param_cleaning_acp_flush_max_buffers: - return cache_mng_get_cleaning_param(info->cache_id, - ocf_cleaning_acp, ocf_acp_flush_max_buffers, - &info->param_value); - default: - return -EINVAL; - } -} diff --git a/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig b/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig deleted file mode 100644 index 2e3a81e21..000000000 --- a/modules/cas_cache/volume/vol_atomic_dev_bottom.c.orig +++ /dev/null @@ -1,1217 +0,0 @@ -/* -* Copyright(c) 2012-2019 Intel Corporation -* SPDX-License-Identifier: BSD-3-Clause-Clear -*/ - -#include "cas_cache.h" -#if defined(CAS_NVME_FULL) - -#include -#include - -#define CAS_DEBUG_IO_ATOMIC 0 - -#if 1 == CAS_DEBUG_IO_ATOMIC -#define CAS_DEBUG_TRACE() printk(KERN_DEBUG \ - "[IO][ATOMIC] %s:%d\n", __func__, __LINE__) - -#define CAS_DEBUG_MSG(msg) printk(KERN_DEBUG \ - "[IO][ATOMIC] %s:%d - %s\n", __func__, __LINE__, msg) - -#define CAS_DEBUG_PARAM(format, ...) printk(KERN_DEBUG \ - "[IO][ATOMIC] %s:%d - "format"\n", __func__, \ - __LINE__, ##__VA_ARGS__) -#else -#define CAS_DEBUG_TRACE() -#define CAS_DEBUG_MSG(msg) -#define CAS_DEBUG_PARAM(format, ...) -#endif - -#define ADMIN_TIMEOUT (60 * HZ) - -struct cas_atomic_io { - ocf_volume_t volume; - - struct cas_atomic_io *master; - atomic_t req_remaining; - atomic_t potential_dirty; - uint32_t count; - - uint64_t addr; - uint32_t bytes; - uint32_t start; - uint32_t end; - - int error; - unsigned dir:1; - unsigned metadata:1; - unsigned discard:1; - unsigned long flags; - - ocf_end_io_t cmpl_fn; - void *cmpl_context; - - struct blk_data *data; - uint32_t bvec_size; - - struct nvme_command cmd; - struct bio *bio; - struct request *request; - - struct bio_vec_iter iter; -}; - -static struct ocf_mpool *atomic_io_allocator; - -static inline uint32_t cas_atomic_max_io_sectors(void) -{ - /* TODO Take into account max IO size of bottom device */ - return 128 * KiB / (SECTOR_SIZE + OCF_ATOMIC_METADATA_SIZE); -} - -static inline uint32_t cas_atomic_size_of(uint32_t size) -{ - BUG_ON(size % SECTOR_SIZE); - return size + (size / SECTOR_SIZE * OCF_ATOMIC_METADATA_SIZE); -} - -static void cas_atomic_dealloc(struct cas_atomic_io *atomics) -{ - uint32_t i; - - for (i = 0; i < atomics->count; i++) { - struct cas_atomic_io *this = &atomics[i]; - - if (this->request && !IS_ERR(this->request)) { - blk_mq_free_request(this->request); - this->request = NULL; - } - - if (this->bio) - bio_put(this->bio); - - if (this->data) { - cas_ctx_data_secure_erase(this->data); - cas_ctx_data_free(this->data); - } - } - - ocf_mpool_del(atomic_io_allocator, atomics, atomics->count); -} - -static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool write_zero) -{ - /* Get max size of IO */ - const uint32_t max_io_size = cas_atomic_max_io_sectors() - * SECTOR_SIZE; - - /* Get number of IOs to be issued */ - uint32_t ios_count; - ocf_cache_t cache = ocf_volume_get_cache(io->volume); - - uint64_t addr = io->addr; - uint32_t i, bytes = io->bytes; - uint32_t increase_sectors_start = 0, increase_sectors_end = 0; - struct cas_atomic_io *atoms; - - if (dir == OCF_WRITE && !write_zero) { - /* TODO: this logic is probably no longer required */ - BUG_ON(!cache); - increase_sectors_start = - ocf_metadata_check_invalid_before(cache, addr); - - increase_sectors_end = - ocf_metadata_check_invalid_after(cache, addr, - io->bytes); - increase_sectors_start *= 512; - increase_sectors_end *= 512; - - if (increase_sectors_start) { - bytes += increase_sectors_start; - addr -= increase_sectors_start; - } - - if (increase_sectors_end) - bytes += increase_sectors_end; - } - - /* Get number of IOs to be issued */ - ios_count = DIV_ROUND_UP(bytes, max_io_size); - - atoms = ocf_mpool_new(atomic_io_allocator, - ios_count); - if (!atoms) - return NULL; - - CAS_DEBUG_PARAM("Addr = %llu, bytes = %u", io->addr, io->bytes); - - /* setup master IO */ - atomic_set(&atoms->req_remaining, ios_count); - - atoms->count = ios_count; - atoms->cmpl_fn = io->end; - atoms->cmpl_context = io; - - for (i = 0; i < ios_count; i++) { - struct cas_atomic_io *this = &atoms[i]; - - this->master = atoms; - this->addr = addr; - this->bytes = min(bytes, max_io_size); - this->dir = dir; - this->flags = io->flags; - this->volume = io->volume; - - CAS_DEBUG_PARAM("Sub-atomic IO (%u), Addr = %llu, bytes = %u", - i, this->addr, this->bytes); - - addr += this->bytes; - bytes -= this->bytes; - - /* Allocate BIO data vector with pages */ - this->bvec_size = cas_atomic_size_of(this->bytes); - this->bvec_size = DIV_ROUND_UP(this->bvec_size, PAGE_SIZE); - - if (write_zero || increase_sectors_start || - increase_sectors_end) - this->data = cas_ctx_data_zalloc(this->bvec_size); - else - this->data = cas_ctx_data_alloc(this->bvec_size); - - if (!this->data) - goto cas_atomic_alloc_ERROR; - - /* Set length of last page */ - this->data->vec[this->bvec_size - 1].bv_len = - cas_atomic_size_of(this->bytes) % PAGE_SIZE; - - CAS_DEBUG_PARAM("Sub-atomic IO (%u), BIO vector size = %u, " - "alignment %u", i, this->bvec_size, - this->data->vec[this->bvec_size - 1].bv_len); - - this->start = min(this->bytes, increase_sectors_start); - increase_sectors_start -= this->start; - } - BUG_ON(bytes); - - for (i = ios_count; i && increase_sectors_end; i--) { - struct cas_atomic_io *this = &atoms[i - 1]; - - this->end = min(this->bytes, increase_sectors_end); - increase_sectors_end -= this->end; - } - - return atoms; - -cas_atomic_alloc_ERROR: - - if (atoms) - cas_atomic_dealloc(atoms); - - return NULL; -} - -static int cas_atomic_rd_complete(struct cas_atomic_io *atom) -{ - struct bio_vec_iter *dst, src; - uint32_t copied; - const uint32_t size = OCF_ATOMIC_METADATA_SIZE; - - uint32_t bytes = atom->bytes; - - CAS_DEBUG_TRACE(); - - /* Initialize iterators */ - cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); - dst = &atom->iter; - - BUG_ON(bytes % SECTOR_SIZE); - BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); - - copied = 0; - while (bytes) { - /* Copy data */ - copied += cas_io_iter_cpy(dst, &src, SECTOR_SIZE); - - /* Omit metadata */ - copied += cas_io_iter_move(&src, size); - - bytes -= SECTOR_SIZE; - } - - /* Validate if copied proper numbers of bytes */ - if (copied != cas_atomic_size_of(atom->bytes)) { - CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", - copied, cas_atomic_size_of(atom->bytes)); - /* Metadata and data coping problem */ - return -EIO; - } - - return 0; -} - -static int cas_atomic_rd_metadata_complete(struct cas_atomic_io *atom) -{ - struct bio_vec_iter *dst, src; - uint32_t copied; - const uint32_t size = OCF_ATOMIC_METADATA_SIZE; - - uint32_t bytes = atom->bytes; - - CAS_DEBUG_TRACE(); - - /* Initialize iterators */ - cas_io_iter_init(&src, atom->data->vec, atom->bvec_size); - dst = &atom->iter; - - BUG_ON(bytes % SECTOR_SIZE); - BUG_ON(size != OCF_ATOMIC_METADATA_SIZE); - - copied = 0; - while (bytes) { - /* Copy data */ - copied += cas_io_iter_move(&src, SECTOR_SIZE); - - /* Omit metadata */ - copied += cas_io_iter_cpy(dst, &src, size); - - bytes -= SECTOR_SIZE; - } - - /* Validate if copied proper numbers of bytes */ - if (copied != cas_atomic_size_of(atom->bytes)) { - CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", - copied, cas_atomic_size_of(atom->bytes)); - /* Metadata and data coping problem */ - return -EIO; - } - - return 0; -} - -static int cas_atomic_rd_prepare(struct ocf_io *io, - struct cas_atomic_io *atom) -{ - struct blkio *blkio = cas_io_to_blkio(io); - uint32_t moved; - - /* Store BIO vector iterator, when read completed then it will be - * known were place data - */ - cas_io_iter_copy_set(&atom->iter, &blkio->iter); - - /* Move iterator for next IO */ - moved = cas_io_iter_move(&blkio->iter, atom->bytes); - - /* Validate if there is enough space in BIO data vector to do read */ - if (moved != atom->bytes) { - CAS_DEBUG_PARAM("ERROR, moved %u, expected = %u", - moved, cas_atomic_size_of(atom->bytes)); - return -EINVAL; - } - - return 0; -} - -static int cas_atomic_wr_prepare(struct ocf_io *io, - struct cas_atomic_io *atom) -{ - struct blkio *blkio = cas_io_to_blkio(io); - ocf_cache_t cache; - struct ocf_atomic_metadata metadata; - struct bio_vec_iter dst, src; - uint32_t copied, added; - - uint64_t addr = atom->addr; - uint32_t bytes = atom->bytes; - - cache = ocf_volume_get_cache(io->volume); - - /* Initialize iterators */ - cas_io_iter_init(&dst, atom->data->vec, atom->bvec_size); - cas_io_iter_copy_set(&src, &blkio->iter); - - BUG_ON(!cache); - BUG_ON(bytes % SECTOR_SIZE); - - copied = 0; - if (atom->start) { - added = cas_atomic_size_of(atom->start); - cas_io_iter_move(&dst, added); - - bytes -= atom->start; - copied = added; - - addr += atom->start; - } - - if (atom->end) { - added = cas_atomic_size_of(atom->end); - bytes -= atom->end; - copied += added; - } - - BUG_ON(sizeof(metadata) != OCF_ATOMIC_METADATA_SIZE); - - while (bytes) { - /* Get metadata */ - if (ocf_metadata_get_atomic_entry(cache, addr, &metadata)) - break; - - /* Copy data */ - copied += cas_io_iter_cpy(&dst, &src, SECTOR_SIZE); - - /* Copy metadata */ - copied += cas_io_iter_cpy_from_data(&dst, &metadata, - sizeof(metadata)); - - bytes -= SECTOR_SIZE; - addr += SECTOR_SIZE; - } - - cas_io_iter_move(&blkio->iter, atom->bytes - (atom->start + atom->end)); - - /* Validate if copied proper numbers of bytes */ - if (copied != cas_atomic_size_of(atom->bytes)) { - CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", - copied, cas_atomic_size_of(atom->bytes)); - /* Metadata and data coping problem */ - return -EINVAL; - } - - return 0; -} - -static int cas_atomic_rd_metadata_prepare(struct ocf_io *io, - struct cas_atomic_io *atom) -{ - struct blkio *blkio = cas_io_to_blkio(io); - uint32_t moved; - - BUG_ON(io->dir != OCF_READ); - - atom->metadata = true; - - /* Store BIO vector iterator, when read completed then it will be - * known were place data - */ - cas_io_iter_copy_set(&atom->iter, &blkio->iter); - - /* Move iterator for next IO */ - moved = cas_io_iter_move(&blkio->iter, (atom->bytes / SECTOR_SIZE) * - OCF_ATOMIC_METADATA_SIZE); - - /* Validate if copied proper numbers of bytes */ - if (moved != (atom->bytes / SECTOR_SIZE) * - OCF_ATOMIC_METADATA_SIZE) { - CAS_DEBUG_PARAM("ERROR, copied %u, expected = %u", - moved, cas_atomic_size_of(atom->bytes)); - /* Metadata and data coping problem */ - return -EIO; - } - - return 0; -} - -static void cas_atomic_end_atom(struct cas_atomic_io *atom, int error) -{ - struct cas_atomic_io *master = atom->master; - struct ocf_io *io = master->cmpl_context; - - if (error) - master->error |= error; - - if (atomic_dec_return(&master->req_remaining)) - return; - - CAS_DEBUG_MSG("Completion"); - - /* Send completion to caller */ - master->cmpl_fn(io, master->error); - - /* Free allocated structures */ - cas_atomic_dealloc(master); - ocf_io_put(io); -} - -static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio, - unsigned int bytes, int error) -{ - int err; - struct cas_atomic_io *atom; - struct bd_object *bdobj; - - BUG_ON(!bio); - BUG_ON(!bio->bi_private); - err = BLOCK_CALLBACK_ERROR(bio, error); - atom = bio->bi_private; - BUG_ON(!atom->master); - bdobj = bd_object(atom->volume); - - CAS_DEBUG_PARAM("BIO result = %d", BLOCK_CALLBACK_ERROR(bio, error)); - - if (err != 0) - goto out; - - if (atom->discard) - goto out; - - if (atom->metadata) { - if (cas_atomic_rd_metadata_complete(atom)) - atom->master->error = -EIO; - goto out; - } - - switch (atom->dir) { - case OCF_READ: - if (cas_atomic_rd_complete(atom)) - atom->master->error = -EIO; - break; - case OCF_WRITE: - if (!cas_blk_is_flush_io(atom->flags)) { - atomic_inc(&bdobj->potentially_dirty); - } else { - /* IO flush finished, update potential - * dirty state - */ - atomic_sub(atomic_read(&atom->potential_dirty), - &bdobj->potentially_dirty); - } - break; - } - -out: - /* Free BIO, no needed any more */ - BUG_ON(bio != atom->bio); - bio_put(bio); - atom->bio = NULL; - - cas_atomic_end_atom(atom, err); -} - -static void _cas_atomic_setup_cmd( - ocf_volume_t volume, - struct request *req, - struct bio* bio, - uint64_t bytes, - int dir, - void *end_io_data, - struct nvme_command *cmd) -{ - struct bd_object *bdobj = bd_object(volume); - unsigned int ns_id = bdobj->atomic_params.nsid; - unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); - - cmd->rw.opcode = (dir == OCF_WRITE) ? nvme_cmd_write : nvme_cmd_read; - cmd->rw.nsid = cpu_to_le32(ns_id); - cmd->rw.slba = cpu_to_le64(BIO_BISECTOR(bio)); - cmd->rw.length = cpu_to_le16((bytes / SECTOR_SIZE) - 1); - cmd->rw.control = cpu_to_le16(NVME_RW_LR); - - req->cmd_type = REQ_TYPE_DRV_PRIV; - req->cmd_flags |= REQ_FAILFAST_DRIVER; - - *cmd_addr = (unsigned long)cmd; - - req->timeout = ADMIN_TIMEOUT; /* TODO Use timeout for regular IO */ - - req->cmd = (unsigned char *) cmd; - req->cmd_len = sizeof(*cmd); - req->special = NULL; - req->end_io_data = end_io_data; -} - -static void cas_atomic_setup_cmd(int dir, struct cas_atomic_io *atom) -{ - _cas_atomic_setup_cmd(atom->volume, atom->request, atom->bio, - atom->bytes, dir, atom, &atom->cmd); -} - -static void cas_atomic_end_request(struct request *request, int error) -{ - struct cas_atomic_io *atom; - - BUG_ON(!request); - atom = request->end_io_data; - - /* Free request not needed any more */ - BUG_ON(atom->request != request); - blk_mq_free_request(request); - atom->request = NULL; - - CAS_DEBUG_PARAM("RQ result = %d", error); - - cas_atomic_end_atom(atom, error); -} - -static void cas_atomic_fire_atom(int dir, struct ocf_io *io, - struct cas_atomic_io *atom) -{ - struct bd_object *bdobj = bd_object(atom->volume); - struct block_device *bdev = bdobj->btm_bd; - struct request_queue *queue = bdev_get_queue(bdev); - - struct bio *bio; - struct bio_vec *bvec; - uint32_t i; - - /* Allocate BIO */ - bio = atom->bio = bio_alloc(GFP_NOIO, atom->bvec_size); - if (!bio) - goto _fire_atom_ERROR; - - /* Setup BIO */ - bio->bi_bdev = bdev; - BIO_BISECTOR(bio) = atom->addr / SECTOR_SIZE; - bio->bi_next = NULL; - bio->bi_private = atom; - BIO_OP_FLAGS(bio) |= io->flags; - bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); - - /* Add pages to the BIO */ - bvec = atom->data->vec; - for (i = 0; i < atom->bvec_size; i++, bvec++) { - int added = bio_add_pc_page(queue, bio, - bvec->bv_page, bvec->bv_len, bvec->bv_offset); - - if (added != bvec->bv_len) { - /* Oops, a problem, cannot add page to the BIO */ - goto _fire_atom_ERROR; - } - } - - /* Allocate request */ - atom->request = cas_blk_make_request(queue, atom->bio, GFP_NOIO); - if (IS_ERR(atom->request)) { - atom->request = NULL; - goto _fire_atom_ERROR; - } - - /* Setup command */ - cas_atomic_setup_cmd(dir, atom); - - /* Additional completion for request */ - atomic_inc(&atom->master->req_remaining); - - /* Send requests (NVME atomic command) */ - blk_execute_rq_nowait(queue, NULL, atom->request, 0, - cas_atomic_end_request); - - return; - -_fire_atom_ERROR: - CAS_DEBUG_MSG("ERROR"); - cas_atomic_end_atom(atom, -EIO); -} - -static void cas_atomic_fire_atoms(int dir, struct ocf_io *io, - struct cas_atomic_io *atoms) -{ - uint32_t i; - - ocf_io_get(io); - - for (i = 0; i < atoms->count; i++) { - struct cas_atomic_io *this = &atoms[i]; - - CAS_DEBUG_PARAM("Fire(%u), Addr = %llu, bytes = %u", - i, this->addr, this->bytes); - cas_atomic_fire_atom(dir, io, this); - } -} - -typedef int (*cas_prepare_atom_pfn_t)(struct ocf_io *io, - struct cas_atomic_io *atoms); - -static int cas_atomic_prepare_atoms(struct ocf_io *io, - cas_prepare_atom_pfn_t prepare, - struct cas_atomic_io *atoms) -{ - int i; - int result = 0; - - if (!prepare) - return 0; - - for (i = 0; i < atoms->count; i++) { - struct cas_atomic_io *this = &atoms[i]; - - CAS_DEBUG_PARAM("Sub-atomic IO preparation(%u), Addr = %llu, " - "bytes = %u, dir = %d", i, this->addr, - this->bytes, dir); - - result |= prepare(io, this); - } - - return result; -} - -static void cas_atomic_fire_io(struct ocf_io *io, - cas_prepare_atom_pfn_t prepare, - bool write_zero) -{ - int dir = io->dir; - - /* Create atomic IOs context, mainly allocations */ - struct cas_atomic_io *atoms = cas_atomic_alloc(dir, io, write_zero); - - if (!atoms) { - CAS_DEBUG_MSG("Memory allocation ERROR"); - goto _submit_io_ERROR; - } - - /* Prepare IOs, mainly coping data */ - if (cas_atomic_prepare_atoms(io, prepare, atoms)) { - CAS_DEBUG_MSG("Preparation ERROR"); - goto _submit_io_ERROR; - } - - /* Send IO */ - atomic_inc(&atoms->req_remaining); - cas_atomic_fire_atoms(dir, io, atoms); - cas_atomic_end_atom(atoms, 0); - - return; - -_submit_io_ERROR: - if (atoms) - cas_atomic_dealloc(atoms); - - io->end(io, -EIO); -} - -static void cas_atomic_submit_flush_bio(struct cas_atomic_io *atom) -{ - struct request *req = atom->request; - struct bd_object *bdobj = bd_object(atom->volume); - unsigned int ns_id = bdobj->atomic_params.nsid; - struct nvme_command *cmd = &atom->cmd; - unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); - - cmd->rw.opcode = nvme_cmd_flush; - cmd->rw.nsid = cpu_to_le32(ns_id); - - *cmd_addr = (unsigned long)cmd; - - req->cmd_type = REQ_TYPE_DRV_PRIV; - - req->timeout = ADMIN_TIMEOUT; - - req->cmd = (unsigned char *) cmd; - req->cmd_len = sizeof(*cmd); - req->special = NULL; - req->end_io_data = atom; - - /* Additional completion for request */ - atomic_inc(&atom->master->req_remaining); - - /* Send NVMe flush command */ - blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); -} - -static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom) -{ - struct request *req = atom->request; - struct nvme_command *cmd = &atom->cmd; - struct bd_object *bdobj = bd_object(atom->volume); - unsigned int ns_id = bdobj->atomic_params.nsid; - struct nvme_dsm_range *nvm_discard; - struct page *page; - int offset; - unsigned long *cmd_addr = blk_mq_rq_to_pdu(req); - - nvm_discard = kmalloc(sizeof(*nvm_discard), GFP_ATOMIC); - if (!nvm_discard) { - return -ENOMEM; - } - - nvm_discard->cattr = cpu_to_le32(0); - nvm_discard->nlb = cpu_to_le32(BIO_BISIZE(atom->bio) >> SECTOR_SHIFT); - nvm_discard->slba = cpu_to_le64(BIO_BISECTOR(atom->bio)); - - cmd->dsm.opcode = nvme_cmd_dsm; - cmd->dsm.nsid = cpu_to_le32(ns_id); - cmd->dsm.nr = 0; - cmd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); - - req->completion_data = nvm_discard; - page = virt_to_page(nvm_discard); - offset = offset_in_page(nvm_discard); - blk_add_request_payload(req, page, offset, sizeof(*nvm_discard)); - - req->__sector = BIO_BISECTOR(atom->bio); - req->__data_len = BIO_BISIZE(atom->bio); - req->ioprio = bio_prio(atom->bio); - - req->timeout = ADMIN_TIMEOUT; - req->end_io_data = atom; - req->cmd_type = REQ_TYPE_DRV_PRIV; - req->cmd_flags = CAS_BIO_DISCARD; - - req->errors = 0; - - *cmd_addr = (unsigned long)cmd; - - /* Additional completion for request */ - atomic_inc(&atom->master->req_remaining); - - /* Send NVMe flush command */ - blk_execute_rq_nowait(req->q, NULL, req, 0, cas_atomic_end_request); - - return 0; -} - -static int cas_atomic_special_req_prepare(struct cas_atomic_io *atom, - struct ocf_io *io) -{ - struct bd_object *bdobj = bd_object(io->volume); - struct block_device *bdev = bdobj->btm_bd; - - CAS_DEBUG_TRACE(); - atom->master = atom; - atom->count = 1; - atom->cmpl_fn = io->end; - atom->cmpl_context = io; - atom->volume = io->volume; - atom->flags = io->flags; - atomic_set(&atom->req_remaining, 1); - - atom->bio = bio_alloc(GFP_NOIO, 1); - if (!atom->bio) { - CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for bio\n"); - return -ENOMEM; - } - - atom->bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom); - atom->bio->bi_bdev = bdev; - atom->bio->bi_private = atom; - - return 0; -} - -void cas_atomic_submit_discard(struct ocf_io *io) -{ - struct bd_object *bdobj = bd_object(io->volume); - struct block_device *bdev = bdobj->btm_bd; - struct request_queue *q = bdev_get_queue(bdev); - int result = 0; - - struct cas_atomic_io *atom = NULL; - struct blkio *blkio = cas_io_to_blkio(io); - - CAS_DEBUG_TRACE(); - - if (!q) { - /* No queue, error */ - io->end(io, -EINVAL); - return; - } - - /* Allocate and setup control structure. */ - atom = ocf_mpool_new(atomic_io_allocator, 1); - if (!atom) { - CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); - io->end(io, -ENOMEM); - return; - } - - result = cas_atomic_special_req_prepare(atom, io); - if (result) { - blkio->error = result; - goto out; - } - - /* Increase IO reference counter for FLUSH IO */ - ocf_io_get(io); - - /* Set up specific field */ - atom->discard = true; - BIO_OP_FLAGS(atom->bio) = CAS_BIO_DISCARD; - BIO_BISECTOR(atom->bio) = io->addr / SECTOR_SIZE; - BIO_BISIZE(atom->bio) = io->bytes; - - atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); - if (IS_ERR(atom->request)) { - blkio->error = PTR_ERR(atom->request); - goto out; - } - - atomic_inc(&atom->req_remaining); - result = cas_atomic_submit_discard_bio(atom); - if (result) - blkio->error = result; - -out: - cas_atomic_end_atom(atom, blkio->error); -} - -void cas_atomic_submit_flush(struct ocf_io *io) -{ -#ifdef CAS_FLUSH_SUPPORTED - struct bd_object *bdobj = bd_object(io->volume); - struct block_device *bdev = bdobj->btm_bd; - struct request_queue *q = bdev_get_queue(bdev); - int result = 0; - struct cas_atomic_io *atom = NULL; - struct blkio *blkio = cas_io_to_blkio(io); - - CAS_DEBUG_TRACE(); - - blkio->dirty = atomic_read(&bdobj->potentially_dirty); - - if (!blkio->dirty) { - /* Didn't write anything to underlying disk; - * no need to send req_flush - */ - io->end(io, 0); - return; - } - - if (!q) { - io->end(io, -EINVAL); - return; - } - - if (!CHECK_QUEUE_FLUSH(q)) { - /* This block device does not support flush */ - atomic_sub(blkio->dirty, &bdobj->potentially_dirty); - io->end(io, 0); - return; - } - - /* Allocate and setup control structure. */ - atom = ocf_mpool_new(atomic_io_allocator, 1); - if (!atom) { - CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n"); - io->end(io, -ENOMEM); - return; - } - - /* Increase IO reference counter for FLUSH IO */ - ocf_io_get(io); - - result = cas_atomic_special_req_prepare(atom, io); - if (result) { - CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n"); - blkio->error = -ENOMEM; - goto out; - } - - /* Set up specific field */ - atom->dir = OCF_WRITE; - atomic_set(&atom->potential_dirty, blkio->dirty); - - atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); - if (IS_ERR(atom->request)) { - blkio->error = PTR_ERR(atom->request); - goto out; - } - - atomic_inc(&atom->req_remaining); - cas_atomic_submit_flush_bio(atom); - -out: - cas_atomic_end_atom(atom, blkio->error); -#else - /* Running operating system without support for REQ_FLUSH - * (i.e. SLES 11 SP 1) CAS cannot use flushing requests to handle - * power-fail safe Write-Back - */ - struct blkio *bdio = cas_io_to_blkio(io); - - io->end(io, -EINVAL); - /* on SLES 11 SP 1 powerfail safety can only be achieved through - * disabling volatile write cache of disk itself. - */ -#endif -} - -void cas_atomic_submit_io(struct ocf_io *io) -{ - CAS_DEBUG_TRACE(); - - if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) && - CAS_IS_WRITE_FLUSH(io->flags)) { - /* FLUSH */ - cas_atomic_submit_flush(io); - return; - } - - if (unlikely(!io->bytes)) { - CAS_PRINT_RL(KERN_ERR "Zero length request\n"); - io->end(io, -EINVAL); - return; - } - - cas_atomic_fire_io(io, io->dir == OCF_READ ? cas_atomic_rd_prepare : - cas_atomic_wr_prepare, false); -} - -void cas_atomic_submit_metadata(struct ocf_io *io) -{ - BUG_ON(io->dir != OCF_READ); - - CAS_DEBUG_TRACE(); - - if (unlikely(!io->bytes)) { - CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); - io->end(io, -EINVAL); - return; - } - - cas_atomic_fire_io(io, cas_atomic_rd_metadata_prepare, false); -} - -unsigned int cas_atomic_get_max_io_size(ocf_volume_t volume) -{ - struct block_device *bd; - - if (!volume) - return 0; - - bd = bd_object(volume)->btm_bd; - if (!bd->bd_disk) - return 0; - - return queue_max_sectors(bd->bd_disk->queue); -} - -void cas_atomic_close_object(ocf_volume_t volume) -{ - struct bd_object *bdobj = bd_object(volume); - - if(bdobj->workqueue) - destroy_workqueue(bdobj->workqueue); - - block_dev_close_object(volume); -} - -int cas_atomic_open_object(ocf_volume_t volume) -{ - int result; - uint8_t type; - struct bd_object *bdobj = NULL; - - result = block_dev_open_object(volume); - if (result) - return result; - - bdobj = bd_object(volume); - - result = cas_blk_identify_type_by_bdev(bdobj->btm_bd, - &type, &bdobj->atomic_params); - -<<<<<<< HEAD - if (type != ATOMIC_DEVICE_OBJECT) { - cas_atomic_close_object(volume); - result = -OCF_ERR_INVAL_VOLUME_TYPE; -======= - if (type != ATOMIC_DEVICE_VOLUME) { - cas_atomic_close_object(obj); - result = -OCF_ERR_INVAL_DATA_OBJ_TYPE; ->>>>>>> A little cleanup - goto end; - } - - bdobj->workqueue = create_workqueue("CAS_AT_ZER"); - if (!bdobj->workqueue) { - cas_atomic_close_object(volume); - result = -ENOMEM; - goto end; - } - -end: - return result; -} - -uint64_t cas_atomic_get_length(ocf_volume_t volume) -{ - struct bd_object *bdobj = bd_object(volume); - - return bdobj->atomic_params.size; -} - -/* context to keep track of write_zero progress across child IOs */ -struct cas_atomic_write_zero_ctx -{ - struct ocf_io *sub_io; - struct ocf_io *original_io; - struct work_struct cmpl_work; - unsigned step_size; -}; - -static void _cas_atomic_write_zeroes_end(struct cas_atomic_write_zero_ctx *ctx, - int error) -{ - struct ocf_io *io = ctx->original_io; - - /* end master io */ - io->end(io, error); - ocf_io_put(io); - - /* cleanup context */ - ocf_io_put(ctx->sub_io); - kfree(ctx); -} - -/* atomic write zerores I/O completion */ -static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error) -{ - struct cas_atomic_write_zero_ctx *ctx = io->priv1; - struct bd_object *bdobj = bd_object(io->volume); - const unsigned bytes_processed = (io->addr - ctx->original_io->addr) - + io->bytes; - const unsigned bytes_left = ctx->original_io->bytes - bytes_processed; - - BUG_ON(io->bytes > ctx->step_size); - - /* update I/O address and size */ - io->addr += io->bytes; - io->bytes = min(bytes_left, ctx->step_size); - - if (!bytes_left || error) { - _cas_atomic_write_zeroes_end(ctx, error); - } else { - /* submit next IO from work context */ - queue_work(bdobj->workqueue, &ctx->cmpl_work); - } -} - -/* work routine to schedule next portion of write zero I/O */ -void _cas_atomic_write_zeroes_work(struct work_struct *work) -{ - struct cas_atomic_write_zero_ctx *ctx = container_of(work, - struct cas_atomic_write_zero_ctx, cmpl_work); - - cas_atomic_fire_io(ctx->sub_io, NULL, true); -} - -void cas_atomic_submit_write_zeroes(struct ocf_io *io) -{ - /* send 8 atoms in each I/O */ - const unsigned step_size = min(cas_atomic_max_io_sectors() - * SECTOR_SIZE * 8, io->bytes); - struct cas_atomic_write_zero_ctx *ctx = NULL; - int result = 0; - - if (unlikely(!io->bytes)) { - CAS_PRINT_RL(CAS_KERN_ERR "Zero length request\n"); - result = -EINVAL; - goto error; - } - - ctx = kmalloc(sizeof(*ctx), GFP_NOIO); - if (!ctx) { - result = -ENOMEM; - goto error; - } - - ctx->sub_io = ocf_volume_new_io(io->volume); - if (!ctx->sub_io) { - result = -ENOMEM; - goto error_after_ctx; - } - - /* set up context */ - ctx->step_size = step_size; - ctx->original_io = io; - INIT_WORK(&ctx->cmpl_work, _cas_atomic_write_zeroes_work); - - /* get reference to original io */ - ocf_io_get(io); - - /* set up sub-io */ - ocf_io_configure(ctx->sub_io, io->addr, - min(io->bytes, ctx->step_size), - OCF_WRITE, 0, 0); - ocf_io_set_cmpl(ctx->sub_io, ctx, NULL, _cas_atomic_write_zeroes_step_cmpl); - - cas_atomic_fire_io(ctx->sub_io, NULL, true); - - return; - -error_after_ctx: - kfree(ctx); -error: - io->end(io, result); -} - -const struct ocf_volume_properties cas_object_atomic_properties = { - .name = "Atomic Writes NVMe", - .io_priv_size = sizeof(struct blkio), - .volume_priv_size = sizeof(struct bd_object), - .caps = { - .atomic_writes = 1, - }, - .ops = { - .submit_io = cas_atomic_submit_io, - .submit_flush = cas_atomic_submit_flush, - .submit_discard = cas_atomic_submit_discard, - .submit_metadata = cas_atomic_submit_metadata, - .submit_write_zeroes = cas_atomic_submit_write_zeroes, - .open = cas_atomic_open_object, - .close = block_dev_close_object, - .get_max_io_size = cas_atomic_get_max_io_size, - .get_length = cas_atomic_get_length, - }, - .io_ops = { - .set_data = cas_blk_io_set_data, - .get_data = cas_blk_io_get_data, - }, -}; - -int atomic_dev_init(void) -{ - int ret; - -<<<<<<< HEAD - ret = ocf_ctx_register_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT, -======= - ret = ocf_ctx_register_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME, ->>>>>>> A little cleanup - &cas_object_atomic_properties); - - if (ret < 0) - return -EINVAL; - - atomic_io_allocator = ocf_mpool_create(NULL, 0, - sizeof(struct cas_atomic_io), GFP_NOIO, 1, "cas_atomic_io"); - - if (!atomic_io_allocator) { -<<<<<<< HEAD - ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT); -======= - ocf_ctx_unregister_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME); ->>>>>>> A little cleanup - return -ENOMEM; - } - - return 0; -} - -void atomic_dev_deinit(void) -{ - if (atomic_io_allocator) { - ocf_mpool_destroy(atomic_io_allocator); - atomic_io_allocator = NULL; - } - -<<<<<<< HEAD - ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_OBJECT); -======= - ocf_ctx_unregister_data_obj_type(cas_ctx, ATOMIC_DEVICE_VOLUME); ->>>>>>> A little cleanup -} - -#else - -int atomic_dev_init(void) -{ - return 0; -} - -void atomic_dev_deinit(void) -{ -} - -#endif diff --git a/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe b/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe deleted file mode 100644 index 24724ca83..000000000 --- a/modules/cas_cache/volume/vol_block_dev_top.o.ur-safe +++ /dev/null @@ -1,2 +0,0 @@ -/home/robert/work/cas/ICAS_Linux/modules/cas_cache/volume/vol_block_dev_top.o-.text-9bd -/home/robert/work/cas/ICAS_Linux/modules/cas_cache/volume/vol_block_dev_top.o-.text-9c4 diff --git a/modules/cas_disk/exp_obj.o.ur-safe b/modules/cas_disk/exp_obj.o.ur-safe deleted file mode 100644 index 288700eef..000000000 --- a/modules/cas_disk/exp_obj.o.ur-safe +++ /dev/null @@ -1,2 +0,0 @@ -/home/robert/work/cas/ICAS_Linux/modules/cas_disk/exp_obj.o-.text-f20 -/home/robert/work/cas/ICAS_Linux/modules/cas_disk/exp_obj.o-.text-f27