diff --git a/src/deploy/ceph/cinder-volume.yml b/src/deploy/ceph/cinder-volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..73363e18c1487f83b14926b7250aeb762fbf8a1d
--- /dev/null
+++ b/src/deploy/ceph/cinder-volume.yml
@@ -0,0 +1,14 @@
+---
+# This file contains an example to show how to set
+# the cinder-volume service to run in a container.
+#
+# Important note:
+# When using LVM or any iSCSI-based cinder backends, such as NetApp with
+# iSCSI protocol, the cinder-volume service *must* run on metal.
+# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
+
+container_skel:
+  cinder_volumes_container:
+    properties:
+      is_metal: false
+
diff --git a/src/deploy/ceph/opestack_user_config_prod.yml b/src/deploy/ceph/opestack_user_config_prod.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30c94b0ee2b89386fb0cb165493c9d7cf1cc3188
--- /dev/null
+++ b/src/deploy/ceph/opestack_user_config_prod.yml
@@ -0,0 +1,148 @@
+---
+cidr_networks:
+    container: 10.0.1.0/24
+    tunnel: 10.0.10.0/24
+    storage: 10.0.2.0/24
+
+used_ips:
+    - "10.0.1.1,10.0.1.20" # red de management
+    - "10.0.2.1,10.0.2.20" # red de storage
+    - "10.0.10.1,10.0.10.20" # red de vxlan
+
+
+global_overrides:
+    internal_lb_vip_address: 10.0.1.15
+  #
+  # The below domain name must resolve to an IP address
+  # in the CIDR specified in haproxy_keepalived_external_vip_cidr.
+  # If using different protocols (https/http) for the public/internal
+  # endpoints the two addresses must be different.
+  #
+    external_lb_vip_address: 192.168.60.160
+    tunnel_bridge: "br-vxlan"
+    management_bridge: "br-mgmt"
+    storage_bridge: "br-storage"
+
+    provider_networks:
+        - network:
+            group_binds:
+                - all_containers
+                - hosts
+            type: "raw"
+            container_bridge: "br-mgmt"
+            container_interface: "eth1"
+            container_type: "veth"
+            ip_from_q: "container"
+            is_container_address: true
+            is_ssh_address: true
+        - network:
+            group_binds:
+                - glance_api
+                - cinder_api
+                - cinder_volume
+                - nova_compute
+                - ceph-osd # ESTO ACA ES NUEVO
+            type: "raw"
+            container_bridge: "br-storage"
+            container_type: "veth"
+            container_interface: "eth2"
+            container_mtu: "9000"
+            ip_from_q: "storage"
+        - network:
+            group_binds:
+                - neutron_linuxbridge_agent
+            container_bridge: "br-vxlan"
+            container_type: "veth"
+            container_interface: "eth10"
+            container_mtu: "9000"
+            ip_from_q: "tunnel"
+            type: "vxlan"
+            range: "1:1000"
+            net_name: "vxlan"
+#        - network:
+#            group_binds:
+#                - neutron_linuxbridge_agent
+#            container_bridge: "br-vlan"
+#            container_type: "veth"
+#            container_interface: "eth11"
+#            host_bind_override: "eth3"
+#            type: "vlan"
+#            range: "101:200,301:400"
+#            net_name: "vlan"
+        - network:
+            group_binds:
+                - neutron_linuxbridge_agent
+            container_bridge: "br-vlan"
+            container_type: "veth"
+            container_interface: "eth12"
+            host_bind_override: "eth3"
+            type: "flat"
+            net_name: "flat"
+
+###
+### Infrastructure
+###
+
+_infrastructure_hosts: &infrastructure_hosts
+    infra1:
+        ip: 10.0.1.11
+
+# nova hypervisors
+compute_hosts:
+    compute1:
+        ip: 10.0.1.12
+
+# ceph Object Storage Deamons
+ceph-osd_hosts:
+  osd1:
+    ip: 10.0.1.13
+  osd2:
+    ip: 10.0.1.14
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts: *infrastructure_hosts
+
+# ceph monitor containers
+ceph-mon_hosts: *infrastructure_hosts
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts: *infrastructure_hosts
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+    balancer1:
+        ip: 10.0.1.15
+
+# rsyslog server
+log_hosts: *infrastructure_hosts
+
+###
+### OpenStack
+###
+
+# keystone
+identity_hosts: *infrastructure_hosts
+
+# cinder api services
+storage-infra_hosts: *infrastructure_hosts
+
+# glance
+image_hosts: *infrastructure_hosts
+
+# nova api, conductor, etc services
+compute-infra_hosts: *infrastructure_hosts
+
+# heat
+orchestration_hosts: *infrastructure_hosts
+
+# horizon
+dashboard_hosts: *infrastructure_hosts
+
+# neutron server, agents (L3, etc)
+network_hosts: *infrastructure_hosts
+
+# cinder volume hosts (Ceph RBD-backed)
+storage_hosts: *infrastructure_hosts
+
diff --git a/src/deploy/ceph/user_variables.yml b/src/deploy/ceph/user_variables.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6ece305d2c39b862a9d5627b7fb2e2eb4233e7cd
--- /dev/null
+++ b/src/deploy/ceph/user_variables.yml
@@ -0,0 +1,210 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###
+### This file contains commonly used overrides for convenience. Please inspect
+### the defaults for each role to find additional override options.
+###
+
+## Debug and Verbose options.
+debug: true
+
+## Common Glance Overrides
+# Set glance_default_store to "swift" if using Cloud Files backend
+# or "rbd" if using ceph backend; the latter will trigger ceph to get
+# installed on glance. If using a file store, a shared file store is
+# recommended. See the OpenStack-Ansible install guide and the OpenStack
+# documentation for more details.
+# Note that "swift" is automatically set as the default back-end if there
+# are any swift hosts in the environment. Use this setting to override
+# this automation if you wish for a different default back-end.
+# glance_default_store: file
+
+## Ceph pool name for Glance to use
+# glance_rbd_store_pool: images
+# glance_rbd_store_chunk_size: 8
+
+## Common Nova Overrides
+# When nova_libvirt_images_rbd_pool is defined, ceph will be installed on nova
+# hosts.
+# nova_libvirt_images_rbd_pool: vms
+
+# If you wish to change the dhcp_domain configured for both nova and neutron
+# dhcp_domain: openstacklocal
+
+## Common Glance Overrides when using a Swift back-end
+# By default when 'glance_default_store' is set to 'swift' the playbooks will
+# expect to use the Swift back-end that is configured in the same inventory.
+# If the Swift back-end is not in the same inventory (ie it is already setup
+# through some other means) then these settings should be used.
+#
+# NOTE: Ensure that the auth version matches your authentication endpoint.
+#
+# NOTE: If the password for glance_swift_store_key contains a dollar sign ($),
+# it must be escaped with an additional dollar sign ($$), not a backslash. For
+# example, a password of "super$ecure" would need to be entered as
+# "super$$ecure" below.  See Launchpad Bug #1259729 for more details.
+#
+# glance_swift_store_auth_version: 3
+# glance_swift_store_auth_address: "https://some.auth.url.com"
+# glance_swift_store_user: "OPENSTACK_TENANT_ID:OPENSTACK_USER_NAME"
+# glance_swift_store_key: "OPENSTACK_USER_PASSWORD"
+# glance_swift_store_container: "NAME_OF_SWIFT_CONTAINER"
+# glance_swift_store_region: "NAME_OF_REGION"
+
+## Common Ceph Overrides
+# ceph_mons:
+#   - 10.16.5.40
+#   - 10.16.5.41
+#   - 10.16.5.42
+
+## Custom Ceph Configuration File (ceph.conf)
+# By default, your deployment host will connect to one of the mons defined above to
+# obtain a copy of your cluster's ceph.conf.  If you prefer, uncomment ceph_conf_file
+# and customise to avoid ceph.conf being copied from a mon.
+# ceph_conf_file: |
+#   [global]
+#   fsid = 00000000-1111-2222-3333-444444444444
+#   mon_initial_members = mon1.example.local,mon2.example.local,mon3.example.local
+#   mon_host = 10.16.5.40,10.16.5.41,10.16.5.42
+#   # optionally, you can use this construct to avoid defining this list twice:
+#   # mon_host = {{ ceph_mons|join(',') }}
+#   auth_cluster_required = cephx
+#   auth_service_required = cephx
+
+
+# By default, openstack-ansible configures all OpenStack services to talk to
+# RabbitMQ over encrypted connections on port 5671. To opt-out of this default,
+# set the rabbitmq_use_ssl variable to 'false'. The default setting of 'true'
+# is highly recommended for securing the contents of RabbitMQ messages.
+# rabbitmq_use_ssl: false
+
+# RabbitMQ management plugin is enabled by default, the guest user has been
+# removed for security reasons and a new userid 'monitoring' has been created
+# with the 'monitoring' user tag. In order to modify the userid, uncomment the
+# following and change 'monitoring' to your userid of choice.
+# rabbitmq_monitoring_userid: monitoring
+
+
+## Additional pinning generator that will allow for more packages to be pinned as you see fit.
+## All pins allow for package and versions to be defined. Be careful using this as versions
+## are always subject to change and updates regarding security will become your problem from this
+## point on. Pinning can be done based on a package version, release, or origin. Use "*" in the
+## package name to indicate that you want to pin all package to a particular constraint.
+# apt_pinned_packages:
+#   - { package: "lxc", version: "1.0.7-0ubuntu0.1" }
+#   - { package: "libvirt-bin", version: "1.2.2-0ubuntu13.1.9" }
+#   - { package: "rabbitmq-server", origin: "www.rabbitmq.com" }
+#   - { package: "*", release: "MariaDB" }
+
+
+## Environment variable settings
+# This allows users to specify the additional environment variables to be set
+# which is useful in setting where you working behind a proxy. If working behind
+# a proxy It's important to always specify the scheme as "http://". This is what
+# the underlying python libraries will handle best. This proxy information will be
+# placed both on the hosts and inside the containers.
+
+## Example environment variable setup:
+## This is used by apt-cacher-ng to download apt packages:
+proxy_env_url: http://10.0.1.1:3128/
+
+## (1) This sets up a permanent environment, used during and after deployment:
+no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
+global_environment_variables:
+    HTTP_PROXY: "{{ proxy_env_url }}"
+    HTTPS_PROXY: "{{ proxy_env_url }}"
+    NO_PROXY: "{{ no_proxy_env }}"
+    http_proxy: "{{ proxy_env_url }}"
+    https_proxy: "{{ proxy_env_url }}"
+    no_proxy: "{{ no_proxy_env }}"
+#
+## (2) This is applied only during deployment, nothing is left after deployment is complete:
+deployment_environment_variables:
+    http_proxy: "{{ proxy_env_url }}"
+    https_proxy: "{{ proxy_env_url }}"
+    no_proxy: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['keystone_all'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
+
+
+## SSH connection wait time
+# If an increased delay for the ssh connection check is desired,
+# uncomment this variable and set it appropriately.
+#ssh_delay: 5
+
+
+## HAProxy and keepalived
+# All the previous variables are used inside a var, in the group vars.
+# You can override the current keepalived definition (see
+# group_vars/all/keepalived.yml) in your user space if necessary.
+#
+# Uncomment this to disable keepalived installation (cf. documentation)
+# haproxy_use_keepalived: False
+#
+# HAProxy Keepalived configuration (cf. documentation)
+# Make sure that this is set correctly according to the CIDR used for your
+# internal and external addresses.
+# haproxy_keepalived_external_vip_cidr: "{{external_lb_vip_address}}/32"
+# haproxy_keepalived_internal_vip_cidr: "{{internal_lb_vip_address}}/32"
+# haproxy_keepalived_external_interface:
+# haproxy_keepalived_internal_interface:
+
+# Defines the default VRRP id used for keepalived with haproxy.
+# Overwrite it to your value to make sure you don't overlap
+# with existing VRRPs id on your network. Default is 10 for the external and 11 for the
+# internal VRRPs
+# haproxy_keepalived_external_virtual_router_id:
+# haproxy_keepalived_internal_virtual_router_id:
+
+# Defines the VRRP master/backup priority. Defaults respectively to 100 and 20
+# haproxy_keepalived_priority_master:
+# haproxy_keepalived_priority_backup:
+
+# Keepalived default IP address used to check its alive status (IPv4 only)
+# keepalived_ping_address: "193.0.14.129"
+
+# Variable para habilitar la creación de imagenes en el Horizon en forma legacy
+horizon_images_upload_mode: "legacy"
+
+## Ceph cluster fsid (must be generated before first run)
+## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
+generate_fsid: false
+fsid: b89476da-b2ea-4aae-b8cf-0b91d7aa2208 # Replace with your generated UUID
+
+## ceph-ansible settings
+## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
+## additional configuration options availble.
+monitor_address_block: "{{ cidr_networks.container }}"
+public_network: "{{ cidr_networks.container }}"
+cluster_network: "{{ cidr_networks.storage }}"
+osd_scenario: collocated
+journal_size: 10240 # size in MB
+# ceph-ansible automatically creates pools & keys for OpenStack services
+openstack_config: true
+cinder_ceph_client: cinder
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+
+cinder_backends:
+  RBD:
+    volume_driver: cinder.volume.drivers.rbd.RBDDriver
+    rbd_pool: volumes
+    rbd_ceph_conf: /etc/ceph/ceph.conf
+    rbd_store_chunk_size: 8
+    volume_backend_name: rbddriver
+    rbd_user: "{{ cinder_ceph_client }}"
+    rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+    report_discard_supported: true
diff --git a/src/deploy/cinder-volume.yml b/src/deploy/lvm/cinder-volume.yml
similarity index 100%
rename from src/deploy/cinder-volume.yml
rename to src/deploy/lvm/cinder-volume.yml
diff --git a/src/deploy/openstack_user_config_test.yml b/src/deploy/lvm/openstack_user_config_test.yml
similarity index 100%
rename from src/deploy/openstack_user_config_test.yml
rename to src/deploy/lvm/openstack_user_config_test.yml
diff --git a/src/deploy/opestack_user_config_prod.yml b/src/deploy/lvm/opestack_user_config_prod.yml
similarity index 100%
rename from src/deploy/opestack_user_config_prod.yml
rename to src/deploy/lvm/opestack_user_config_prod.yml
diff --git a/src/deploy/user_variables.yml b/src/deploy/lvm/user_variables.yml
similarity index 100%
rename from src/deploy/user_variables.yml
rename to src/deploy/lvm/user_variables.yml
diff --git a/src/deploy/setupDeploy.sh b/src/deploy/setupDeploy.sh
deleted file mode 100644
index ba124dd8167c136a518f92f58ba17a2f628b490b..0000000000000000000000000000000000000000
--- a/src/deploy/setupDeploy.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-echo "CONFIGURANDO PROXY ..."
-sleep 1
-echo "http_proxy=http://10.0.1.1:3128" >> /etc/environment
-echo "https_proxy=http://10.0.1.1:3128" >> /etc/environment
-echo "HTTP_PROXY=http://10.0.1.1:3128" >> /etc/environment
-echo "HTTPS_PROXY=http://10.0.1.1:3128" >> /etc/environment
-for env in $( cat /etc/environment ); do export $(echo $env | sed -e 's/"//g'); done
-
-echo "ACTUALIZANDO CentOS 7..."
-yum upgrade -y
-
-echo "INSTALANDO RDO OSA QUEENS..."
-yum install -y https://rdoproject.org/repos/openstack-queens/rdo-release-queens.rpm
-
-echo "INSTALANDO HERRAMIENTAS..."
-yum install -y git ntp nano net-tools ntpdate openssh-server python-devel sudo '@Development Tools'
-
-echo "DESHABILITANDO FIREWALL"
-sleep 1
-systemctl stop firewalld
-systemctl mask firewalld
-
-echo "CONFIGURAR CHRONY MANUALMENTE!!!"
-sleep 1
-# nano /etc/chrony.conf
-# Sustituir por las líneas
-# server 0.south-america.pool.ntp.org
-# server 1.south-america.pool.ntp.org
-# server 2.south-america.pool.ntp.org
-# server 3.south-america.pool.ntp.org
-
-echo "CLANANDO REPOSITORIO GIT..."
-git clone -b 17.1.4 https://git.openstack.org/openstack/openstack-ansible /opt/openstack-ansible
-
-echo "EJECUTANDO BOOTSTRAP-ANSIBLE..."
-/opt/openstack-ansible/scripts/bootstrap-ansible.sh
-
-echo "GENERANDO CLAVES SSH..."
-ssh-keygen
-
-echo "COPIAR MANUALMENTE CLAVES SSH..."
-#ssh-copy-id root@10.0.1.11