From b8c982b53001b3becab608ad5261a9c2f4b6a7ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Wed, 29 Oct 2025 08:33:37 -0600 Subject: [PATCH 01/31] First iteration --- plugins/filter/generate_data_set_name.py | 85 ++++++++++++++++++++ tests/filters/test_generate_data_set_name.py | 10 +++ 2 files changed, 95 insertions(+) create mode 100644 plugins/filter/generate_data_set_name.py create mode 100644 tests/filters/test_generate_data_set_name.py diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py new file mode 100644 index 0000000000..2bd7bfa72a --- /dev/null +++ b/plugins/filter/generate_data_set_name.py @@ -0,0 +1,85 @@ +# Copyright (c) IBM Corporation 2025 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import string +import re +from ansible.errors import AnsibleFilterError + +def generate_data_set_name(value, llq="", generations=1): + """Filter to generate valid data set names + + Args: + value {str} -- value of high level qualifier to use on data set names + llq {str, optional} -- last level qualifier use on names. Defaults to "". + generations {int, optional} -- number of dataset names to generate. Defaults to 1. + + Returns: + list -- the total dataset names valid + """ + if len(value) > 8: + raise AnsibleFilterError("The high level qualifier is too long for the data set name") + + if len(llq) > 8: + raise AnsibleFilterError("The last level qualifier is too long for the data set name") + + if generations > 1: + dataset_names = [] + for generation in range(generations): + name = value + get_tmp_ds_name(llq=llq) + dataset_names.append(name) + else: + dataset_names = "" + value += get_tmp_ds_name(llq=llq) + + return dataset_names + +def get_tmp_ds_name(llq=""): + """Unify the random qualifiers generate in one name. + + Args: + llq {str, optional}: last level qualifier use on names. Defaults to "". + + Returns: + str: valid data set name + """ + ds = "." + ds += "P" + get_random_q() + "." + ds += "T" + get_random_q() + "." + if llq: + ds+=llq + return ds + else: + ds+= "C" + get_random_q() + return ds + +def get_random_q(): + """ Function or test to ensure random hlq of datasets""" + # Generate the first random hlq of size pass as parameter + letters = string.ascii_uppercase + string.digits + random_q = ''.join(random.choice(letters)for iteration in range(7)) + count = 0 + # Generate a random HLQ and verify if is valid, if not, repeat the process + while count < 5 and not re.fullmatch( + r"^(?:[A-Z$#@]{1}[A-Z0-9$#@-]{0,7})", + random_q, + re.IGNORECASE, + ): + random_q = ''.join(random.choice(letters)for iteration in range(7)) + count += 1 + return random_q + +class FilterModule(object): + """ Jinja2 filter for the returned list or string by the collection module. """ + def filters(self): + return { + 'generate_data_set_name': generate_data_set_name + } diff --git a/tests/filters/test_generate_data_set_name.py b/tests/filters/test_generate_data_set_name.py new file mode 100644 index 0000000000..68e09dd745 --- /dev/null +++ b/tests/filters/test_generate_data_set_name.py @@ -0,0 +1,10 @@ +import jinja2 +from filter_plugins.my_filters import repeat_as_list + +def test_repeat_as_list_jinja2(): + env = jinja2.Environment() + env.filters['repeat_as_list'] = repeat_as_list + + template = env.from_string("{{ 'core' | repeat_as_list(3) }}") + result = template.render() + assert result == "['core', 'core', 'core']" \ No newline at end of file From 411f8d0e8ebab4f49dbb865995bbedcef6bee4e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Wed, 29 Oct 2025 14:20:16 -0600 Subject: [PATCH 02/31] Fix bad return --- plugins/filter/generate_data_set_name.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 2bd7bfa72a..8beb706035 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -37,8 +37,7 @@ def generate_data_set_name(value, llq="", generations=1): name = value + get_tmp_ds_name(llq=llq) dataset_names.append(name) else: - dataset_names = "" - value += get_tmp_ds_name(llq=llq) + dataset_names = value + get_tmp_ds_name(llq=llq) return dataset_names From 400c79add51e7cf5fe63f16738b97f3cca576bd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Wed, 29 Oct 2025 14:20:37 -0600 Subject: [PATCH 03/31] Generate pytest test --- tests/filters/test_generate_data_set_name.py | 36 +++++++++++++++----- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tests/filters/test_generate_data_set_name.py b/tests/filters/test_generate_data_set_name.py index 68e09dd745..3d099ab57c 100644 --- a/tests/filters/test_generate_data_set_name.py +++ b/tests/filters/test_generate_data_set_name.py @@ -1,10 +1,30 @@ -import jinja2 -from filter_plugins.my_filters import repeat_as_list +# -*- coding: utf-8 -*- -def test_repeat_as_list_jinja2(): - env = jinja2.Environment() - env.filters['repeat_as_list'] = repeat_as_list +# Copyright (c) IBM Corporation 2025 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import pytest + +def test_generate_data_set_name_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + hosts.all.set_fact(input_string=input_string) + results = hosts.all.debug(msg="{{ input_string | generate_data_set_name }}") + + for result in results.contacted.values(): + print(result) + assert result.get('msg') is not None + assert input_string in result.get('msg') - template = env.from_string("{{ 'core' | repeat_as_list(3) }}") - result = template.render() - assert result == "['core', 'core', 'core']" \ No newline at end of file From cc2bc291404e04b14de41e13fba9398eaeed0bd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Wed, 29 Oct 2025 14:42:15 -0600 Subject: [PATCH 04/31] Add documentation and full test suite --- plugins/filter/generate_data_set_name.py | 23 +++--------- plugins/filter/generate_data_set_name.yml | 39 ++++++++++++++++++++ tests/filters/test_generate_data_set_name.py | 21 ++++++++++- 3 files changed, 65 insertions(+), 18 deletions(-) create mode 100644 plugins/filter/generate_data_set_name.yml diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 8beb706035..a926f7d642 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -14,12 +14,11 @@ import re from ansible.errors import AnsibleFilterError -def generate_data_set_name(value, llq="", generations=1): +def generate_data_set_name(value, generations=1): """Filter to generate valid data set names Args: value {str} -- value of high level qualifier to use on data set names - llq {str, optional} -- last level qualifier use on names. Defaults to "". generations {int, optional} -- number of dataset names to generate. Defaults to 1. Returns: @@ -28,37 +27,27 @@ def generate_data_set_name(value, llq="", generations=1): if len(value) > 8: raise AnsibleFilterError("The high level qualifier is too long for the data set name") - if len(llq) > 8: - raise AnsibleFilterError("The last level qualifier is too long for the data set name") - if generations > 1: dataset_names = [] for generation in range(generations): - name = value + get_tmp_ds_name(llq=llq) + name = value + get_tmp_ds_name() dataset_names.append(name) else: - dataset_names = value + get_tmp_ds_name(llq=llq) + dataset_names = value + get_tmp_ds_name() return dataset_names -def get_tmp_ds_name(llq=""): +def get_tmp_ds_name(): """Unify the random qualifiers generate in one name. - Args: - llq {str, optional}: last level qualifier use on names. Defaults to "". - Returns: str: valid data set name """ ds = "." ds += "P" + get_random_q() + "." ds += "T" + get_random_q() + "." - if llq: - ds+=llq - return ds - else: - ds+= "C" + get_random_q() - return ds + ds+= "C" + get_random_q() + return ds def get_random_q(): """ Function or test to ensure random hlq of datasets""" diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml new file mode 100644 index 0000000000..b22d4a6dca --- /dev/null +++ b/plugins/filter/generate_data_set_name.yml @@ -0,0 +1,39 @@ +# Copyright (c) IBM Corporation 2025 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +DOCUMENTATION: + name: generate_data_set_name + author: Marcel Gutierrez (@AndreMarcel99) + version_added: "2.0.0" + short_description: Filter returned valid data set names + description: + - Provide a valid temporary data set name. + options: + value: + description: + - High level qualifier. + type: str + required: true + generations: + description: + - Number of dta set names that you require to generate. + type: int + required: false + +EXAMPLES: | + # Get only one data set name. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name}}" + # Get 10 data set names. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(10)}}" + +RETURN: + _value: + description: Name or names generated by the filter. + type: list \ No newline at end of file diff --git a/tests/filters/test_generate_data_set_name.py b/tests/filters/test_generate_data_set_name.py index 3d099ab57c..920c7b840a 100644 --- a/tests/filters/test_generate_data_set_name.py +++ b/tests/filters/test_generate_data_set_name.py @@ -24,7 +24,26 @@ def test_generate_data_set_name_filter(ansible_zos_module): results = hosts.all.debug(msg="{{ input_string | generate_data_set_name }}") for result in results.contacted.values(): - print(result) assert result.get('msg') is not None assert input_string in result.get('msg') +def test_generate_data_set_name_filter_multiple_generations(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + hosts.all.set_fact(input_string=input_string) + results = hosts.all.debug(msg="{{ input_string | generate_data_set_name(10) }}") + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert input_string in result.get('msg')[0] + assert len(result.get('msg')) == 10 + +def test_generate_data_set_name_filter_bad_hl1(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADMONE" + hosts.all.set_fact(input_string=input_string) + results = hosts.all.debug(msg="{{ input_string | generate_data_set_name }}") + + for result in results.contacted.values(): + assert result.get('failed') is True + assert result.get('msg') == "The high level qualifier is too long for the data set name" From 491c971850172bcb49b843e08d8f7cb895a4ecd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 31 Oct 2025 12:28:19 -0600 Subject: [PATCH 05/31] Fix comments --- plugins/filter/generate_data_set_name.py | 26 ++++++++++++++---------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index a926f7d642..f1560e20ae 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -14,6 +14,7 @@ import re from ansible.errors import AnsibleFilterError + def generate_data_set_name(value, generations=1): """Filter to generate valid data set names @@ -30,13 +31,14 @@ def generate_data_set_name(value, generations=1): if generations > 1: dataset_names = [] for generation in range(generations): - name = value + get_tmp_ds_name() + name = value + get_tmp_ds_name() dataset_names.append(name) else: dataset_names = value + get_tmp_ds_name() return dataset_names + def get_tmp_ds_name(): """Unify the random qualifiers generate in one name. @@ -46,25 +48,27 @@ def get_tmp_ds_name(): ds = "." ds += "P" + get_random_q() + "." ds += "T" + get_random_q() + "." - ds+= "C" + get_random_q() + ds += "C" + get_random_q() return ds + def get_random_q(): """ Function or test to ensure random hlq of datasets""" # Generate the first random hlq of size pass as parameter - letters = string.ascii_uppercase + string.digits - random_q = ''.join(random.choice(letters)for iteration in range(7)) + letters = string.ascii_uppercase + string.digits + random_q = ''.join(random.choice(letters)for iteration in range(7)) count = 0 # Generate a random HLQ and verify if is valid, if not, repeat the process - while count < 5 and not re.fullmatch( - r"^(?:[A-Z$#@]{1}[A-Z0-9$#@-]{0,7})", - random_q, - re.IGNORECASE, - ): - random_q = ''.join(random.choice(letters)for iteration in range(7)) - count += 1 + while count < 5 and not re.fullmatch( + r"^(?:[A-Z$#@]{1}[A-Z0-9$#@-]{0,7})", + random_q, + re.IGNORECASE, + ): + random_q = ''.join(random.choice(letters)for iteration in range(7)) + count += 1 return random_q + class FilterModule(object): """ Jinja2 filter for the returned list or string by the collection module. """ def filters(self): From 2580b14dfbfec2ad76a37a7d41195dfa93a639cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 31 Oct 2025 12:32:31 -0600 Subject: [PATCH 06/31] Fix dataset --- plugins/filter/generate_data_set_name.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index f1560e20ae..5ebaa60765 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -60,12 +60,12 @@ def get_random_q(): count = 0 # Generate a random HLQ and verify if is valid, if not, repeat the process while count < 5 and not re.fullmatch( - r"^(?:[A-Z$#@]{1}[A-Z0-9$#@-]{0,7})", - random_q, - re.IGNORECASE, - ): - random_q = ''.join(random.choice(letters)for iteration in range(7)) - count += 1 + r"^(?:[A-Z$#@]{1}[A-Z0-9$#@-]{0,7})", + random_q, + re.IGNORECASE, + ): + random_q = ''.join(random.choice(letters)for iteration in range(7)) + count += 1 return random_q From 98c94109469772ca3f445c3e069a153dafb0523e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 31 Oct 2025 12:42:22 -0600 Subject: [PATCH 07/31] Fix bandit --- plugins/filter/generate_data_set_name.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 5ebaa60765..afb9093ca0 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import random +import secrets import string import re from ansible.errors import AnsibleFilterError @@ -56,7 +56,7 @@ def get_random_q(): """ Function or test to ensure random hlq of datasets""" # Generate the first random hlq of size pass as parameter letters = string.ascii_uppercase + string.digits - random_q = ''.join(random.choice(letters)for iteration in range(7)) + random_q = ''.join(secrets.choice(letters)for iteration in range(7)) count = 0 # Generate a random HLQ and verify if is valid, if not, repeat the process while count < 5 and not re.fullmatch( @@ -64,7 +64,7 @@ def get_random_q(): random_q, re.IGNORECASE, ): - random_q = ''.join(random.choice(letters)for iteration in range(7)) + random_q = ''.join(secrets.choice(letters)for iteration in range(7)) count += 1 return random_q From 3d1b82bcef06c5ac8e948c2027b6732c5cb6d31a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 31 Oct 2025 12:43:36 -0600 Subject: [PATCH 08/31] Fix typo --- plugins/filter/generate_data_set_name.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml index b22d4a6dca..f3cd1bad18 100644 --- a/plugins/filter/generate_data_set_name.yml +++ b/plugins/filter/generate_data_set_name.yml @@ -23,7 +23,7 @@ DOCUMENTATION: required: true generations: description: - - Number of dta set names that you require to generate. + - Number of data set names that you require to generate. type: int required: false From c07d41f199459b2b43da39d9e6f1aa8dbcb24268 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Mon, 3 Nov 2025 12:17:48 -0600 Subject: [PATCH 09/31] Move files for dependency finder --- tests/{ => functional}/filters/test_generate_data_set_name.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{ => functional}/filters/test_generate_data_set_name.py (100%) diff --git a/tests/filters/test_generate_data_set_name.py b/tests/functional/filters/test_generate_data_set_name.py similarity index 100% rename from tests/filters/test_generate_data_set_name.py rename to tests/functional/filters/test_generate_data_set_name.py From c63ded26f24da8d6d341f2081ac6dfdec7bab479 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Mon, 3 Nov 2025 12:21:42 -0600 Subject: [PATCH 10/31] Rename filter to filters --- plugins/{filter => filters}/__init__.py | 0 plugins/{filter => filters}/generate_data_set_name.py | 0 plugins/{filter => filters}/generate_data_set_name.yml | 0 plugins/{filter => filters}/wtor.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename plugins/{filter => filters}/__init__.py (100%) rename plugins/{filter => filters}/generate_data_set_name.py (100%) rename plugins/{filter => filters}/generate_data_set_name.yml (100%) rename plugins/{filter => filters}/wtor.py (100%) diff --git a/plugins/filter/__init__.py b/plugins/filters/__init__.py similarity index 100% rename from plugins/filter/__init__.py rename to plugins/filters/__init__.py diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filters/generate_data_set_name.py similarity index 100% rename from plugins/filter/generate_data_set_name.py rename to plugins/filters/generate_data_set_name.py diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filters/generate_data_set_name.yml similarity index 100% rename from plugins/filter/generate_data_set_name.yml rename to plugins/filters/generate_data_set_name.yml diff --git a/plugins/filter/wtor.py b/plugins/filters/wtor.py similarity index 100% rename from plugins/filter/wtor.py rename to plugins/filters/wtor.py From 8ae1f9809b0b1baa45c035b97c78bb01040bf97a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Mon, 3 Nov 2025 12:35:58 -0600 Subject: [PATCH 11/31] Return name --- plugins/{filters => filter}/__init__.py | 0 plugins/{filters => filter}/generate_data_set_name.py | 0 plugins/{filters => filter}/generate_data_set_name.yml | 0 plugins/{filters => filter}/wtor.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename plugins/{filters => filter}/__init__.py (100%) rename plugins/{filters => filter}/generate_data_set_name.py (100%) rename plugins/{filters => filter}/generate_data_set_name.yml (100%) rename plugins/{filters => filter}/wtor.py (100%) diff --git a/plugins/filters/__init__.py b/plugins/filter/__init__.py similarity index 100% rename from plugins/filters/__init__.py rename to plugins/filter/__init__.py diff --git a/plugins/filters/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py similarity index 100% rename from plugins/filters/generate_data_set_name.py rename to plugins/filter/generate_data_set_name.py diff --git a/plugins/filters/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml similarity index 100% rename from plugins/filters/generate_data_set_name.yml rename to plugins/filter/generate_data_set_name.yml diff --git a/plugins/filters/wtor.py b/plugins/filter/wtor.py similarity index 100% rename from plugins/filters/wtor.py rename to plugins/filter/wtor.py From 97069f2344787618759d355f41f366b57184ab01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Mon, 3 Nov 2025 12:38:55 -0600 Subject: [PATCH 12/31] Add to dependency finder for filters --- tests/dependencyfinder.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/dependencyfinder.py b/tests/dependencyfinder.py index ce2f4ebac8..d82b7a9af9 100755 --- a/tests/dependencyfinder.py +++ b/tests/dependencyfinder.py @@ -574,6 +574,12 @@ def get_changed_plugins(path, branch="origin/dev"): path_corrected_line = line.split("|", 1)[0].strip() if "plugins/modules/" in line: path_corrected_line = line.split("|", 1)[0].strip() + if "plugins/filter/" in line: + path_corrected_line = line.split("|", 1)[0].strip() + if "functional/filters/" in line: + if re.match('..', line): + line = line.replace("..", "tests") + path_corrected_line = line.split("|", 1)[0].strip() if "functional/modules/" in line: if re.match('...', line): line = line.replace("...", "tests") From eff0c5ebd3540e8b5911d45ac56e12f395e0ae3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Mon, 3 Nov 2025 12:51:07 -0600 Subject: [PATCH 13/31] Add to ansible config to ensure work properly --- ansible.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible.cfg b/ansible.cfg index 19fa364ccb..4ba6ba3965 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -29,6 +29,7 @@ [defaults] forks = 25 action_plugins=~/.ansible/collections/ansible_collections/ibm/ibm_zos_core/plugins/action +filter_plugins=~/.ansible/collections/ansible_collections/ibm/ibm_zos_core/plugins/filter # remote_tmp = /u/ansible/tmp # remote_port = 22 # debug = True From 0577d52635ea197ea963a11374ceecc4d904a307 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 7 Nov 2025 11:53:59 -0600 Subject: [PATCH 14/31] Update to use middle and last level qualifier --- plugins/filter/generate_data_set_name.py | 68 ++++++-- plugins/filter/generate_data_set_name.yml | 22 ++- .../filters/test_generate_data_set_name.py | 161 +++++++++++++++++- 3 files changed, 237 insertions(+), 14 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index afb9093ca0..dd9630ef1a 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -15,45 +15,68 @@ from ansible.errors import AnsibleFilterError -def generate_data_set_name(value, generations=1): +def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifier="", generations=1): """Filter to generate valid data set names Args: value {str} -- value of high level qualifier to use on data set names + middle_level_qualifier {str,optional} -- str of a possible qualifier + last_level_qualifier {str, optional} -- str of a possible qualifier generations {int, optional} -- number of dataset names to generate. Defaults to 1. Returns: list -- the total dataset names valid """ - if len(value) > 8: - raise AnsibleFilterError("The high level qualifier is too long for the data set name") + hlq = validate_qualifier(qualifier=value) + mlq = "" + llq = "" + + if bool(middle_level_qualifier): + mlq = validate_qualifier(qualifier=middle_level_qualifier) + + if bool(last_level_qualifier): + llq = validate_qualifier(qualifier=last_level_qualifier) if generations > 1: dataset_names = [] for generation in range(generations): - name = value + get_tmp_ds_name() + name = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, last_level_qualifier=llq) dataset_names.append(name) else: - dataset_names = value + get_tmp_ds_name() + dataset_names = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, last_level_qualifier=llq) return dataset_names -def get_tmp_ds_name(): +def get_tmp_ds_name(middle_level_qualifier="", last_level_qualifier=""): """Unify the random qualifiers generate in one name. + Args: + middle_level_qualifier {str,optional} -- valid str of a qualifier + last_level_qualifier {str, optional} -- valid str of a qualifier + Returns: str: valid data set name """ ds = "." - ds += "P" + get_random_q() + "." - ds += "T" + get_random_q() + "." - ds += "C" + get_random_q() + + if bool(middle_level_qualifier): + ds+= middle_level_qualifier + "." + else: + ds += "P" + get_random_q() + "." + + ds += "C" + get_random_q() + "." + + if bool(last_level_qualifier): + ds += last_level_qualifier + else: + ds += "T" + get_random_q() + return ds def get_random_q(): - """ Function or test to ensure random hlq of datasets""" + """Function or test to ensure random hlq of datasets""" # Generate the first random hlq of size pass as parameter letters = string.ascii_uppercase + string.digits random_q = ''.join(secrets.choice(letters)for iteration in range(7)) @@ -69,6 +92,31 @@ def get_random_q(): return random_q +def validate_qualifier(qualifier): + """Function to validate a qualifier with naming rules. + + Args: + qualifier (str): Str to validate as a Qualifier. + + Raises: + AnsibleFilterError: Error of the valid len on the qualifier. + AnsibleFilterError: Error on naming convention on the qualifier. + + Returns: + str: Valid qualifier in upper case. + """ + qualifier = qualifier.upper() + + if len(qualifier) > 8: + raise AnsibleFilterError(f"The qualifier {qualifier} is too long for the data set name.") + + pattern = r'^[A-Z@#$][A-Z0-9@#$]{0,7}$' + if bool(re.fullmatch(pattern, qualifier)): + return qualifier + else: + raise AnsibleFilterError(f"The qualifier {qualifier} is not following the rules for naming conventions.") + + class FilterModule(object): """ Jinja2 filter for the returned list or string by the collection module. """ def filters(self): diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml index f3cd1bad18..0853fc12aa 100644 --- a/plugins/filter/generate_data_set_name.yml +++ b/plugins/filter/generate_data_set_name.yml @@ -21,6 +21,16 @@ DOCUMENTATION: - High level qualifier. type: str required: true + middle_level_qualifier: + description: + - Possible valid middle level qualifier. + type: str + required: false + last_level_qualifier: + description: + - Possible valid last level qualifier. + type: str + required: false generations: description: - Number of data set names that you require to generate. @@ -30,8 +40,18 @@ DOCUMENTATION: EXAMPLES: | # Get only one data set name. clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name}}" + # Get a data set name with a specific middle level qualifier. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM")}}" + # Get a data set name with a specific last level qualifier. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM")}}" + # Get a data set name with a specific middle and last level qualifier. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", last_level_qualifier="LLQADM")}}" # Get 10 data set names. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(10)}}" + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(generations=10)}}" + # Get 3 data set names with a specific last level qualifier. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", generations=3)}}" + # Get 5 data set names with specific middle level qualifier. + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", generations=5)}}" RETURN: _value: diff --git a/tests/functional/filters/test_generate_data_set_name.py b/tests/functional/filters/test_generate_data_set_name.py index 920c7b840a..d8e7a986be 100644 --- a/tests/functional/filters/test_generate_data_set_name.py +++ b/tests/functional/filters/test_generate_data_set_name.py @@ -27,18 +27,140 @@ def test_generate_data_set_name_filter(ansible_zos_module): assert result.get('msg') is not None assert input_string in result.get('msg') +def test_generate_data_set_name_mlq_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "mlqadm" + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}'" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert input_string in result.get('msg') + assert mlq.upper() in result.get('msg') + +def test_generate_data_set_name_mlq_multiple_generations_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "mlqadm" + generations = 5 + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}', " + f"generations={generations}" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert len(result.get('msg')) == generations + assert input_string in result.get('msg')[0] + assert mlq.upper() in result.get('msg')[0] + +def test_generate_data_set_name_llq_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + llq = "llqadm" + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"last_level_qualifier='{llq}'" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert input_string in result.get('msg') + assert llq.upper() in result.get('msg') + +def test_generate_data_set_name_llq_multiple_generations_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + llq = "llqadm" + generations = 5 + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"last_level_qualifier='{llq}', " + f"generations={generations}" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert len(result.get('msg')) == generations + assert input_string in result.get('msg')[0] + assert llq.upper() in result.get('msg')[0] + +def test_generate_data_set_name_mlq_llq_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "mlqadm" + llq = "llqadm" + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}', " + f"last_level_qualifier='{llq}') }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert input_string in result.get('msg') + assert mlq.upper() in result.get('msg') + assert llq.upper() in result.get('msg') + +def test_generate_data_set_name_mlq_llq_multiple_generations_filter(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "mlqadm" + llq = "llqadm" + generations = 3 + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}', " + f"last_level_qualifier='{llq}', " + f"generations={generations}" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('msg') is not None + assert len(result.get('msg')) == generations + assert input_string in result.get('msg')[0] + assert mlq.upper() in result.get('msg')[0] + assert llq.upper() in result.get('msg')[0] + def test_generate_data_set_name_filter_multiple_generations(ansible_zos_module): hosts = ansible_zos_module input_string = "OMVSADM" + generations = 10 hosts.all.set_fact(input_string=input_string) - results = hosts.all.debug(msg="{{ input_string | generate_data_set_name(10) }}") + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"generations={generations}" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) for result in results.contacted.values(): assert result.get('msg') is not None assert input_string in result.get('msg')[0] assert len(result.get('msg')) == 10 -def test_generate_data_set_name_filter_bad_hl1(ansible_zos_module): +def test_generate_data_set_name_filter_bad_hlq(ansible_zos_module): hosts = ansible_zos_module input_string = "OMVSADMONE" hosts.all.set_fact(input_string=input_string) @@ -46,4 +168,37 @@ def test_generate_data_set_name_filter_bad_hl1(ansible_zos_module): for result in results.contacted.values(): assert result.get('failed') is True - assert result.get('msg') == "The high level qualifier is too long for the data set name" + assert result.get('msg') == f"The qualifier {input_string} is too long for the data set name." + +def test_generate_data_set_name_filter_bad_mlq(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "1mlq" + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}'" + f") }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('failed') is True + assert result.get('msg') == f"The qualifier {mlq.upper()} is not following the rules for naming conventions." + +def test_generate_data_set_name_mlq_bad_llq(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADM" + mlq = "mlqadm" + llq = "llqadmhere" + hosts.all.set_fact(input_string=input_string) + jinja_expr = ( + f"{{{{ input_string | generate_data_set_name(" + f"middle_level_qualifier='{mlq}', " + f"last_level_qualifier='{llq}') }}}}" + ) + results = hosts.all.debug(msg=jinja_expr) + + for result in results.contacted.values(): + assert result.get('failed') is True + assert result.get('msg') == f"The qualifier {llq.upper()} is too long for the data set name." From 196f559dc645e761931b4c215f97f252a71de1b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 7 Nov 2025 12:11:55 -0600 Subject: [PATCH 15/31] tests/functional/filters/test_generate_data_set_name.py --- plugins/filter/generate_data_set_name.py | 11 +++++++---- plugins/filter/generate_data_set_name.yml | 8 ++++---- tests/dependencyfinder.py | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index dd9630ef1a..cc65a76013 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -15,18 +15,21 @@ from ansible.errors import AnsibleFilterError -def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifier="", generations=1): +def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifier="", num_names=1): """Filter to generate valid data set names Args: value {str} -- value of high level qualifier to use on data set names middle_level_qualifier {str,optional} -- str of a possible qualifier last_level_qualifier {str, optional} -- str of a possible qualifier - generations {int, optional} -- number of dataset names to generate. Defaults to 1. + num_names {int, optional} -- number of dataset names to generate. Defaults to 1. Returns: list -- the total dataset names valid """ + if value is None or value == "": + raise AnsibleFilterError(f"Require to be provide a HLQ.") + hlq = validate_qualifier(qualifier=value) mlq = "" llq = "" @@ -37,9 +40,9 @@ def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifie if bool(last_level_qualifier): llq = validate_qualifier(qualifier=last_level_qualifier) - if generations > 1: + if num_names > 1: dataset_names = [] - for generation in range(generations): + for generation in range(num_names): name = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, last_level_qualifier=llq) dataset_names.append(name) else: diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml index 0853fc12aa..15a36592be 100644 --- a/plugins/filter/generate_data_set_name.yml +++ b/plugins/filter/generate_data_set_name.yml @@ -31,7 +31,7 @@ DOCUMENTATION: - Possible valid last level qualifier. type: str required: false - generations: + num_names: description: - Number of data set names that you require to generate. type: int @@ -47,11 +47,11 @@ EXAMPLES: | # Get a data set name with a specific middle and last level qualifier. clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", last_level_qualifier="LLQADM")}}" # Get 10 data set names. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(generations=10)}}" + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10)}}" # Get 3 data set names with a specific last level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", generations=3)}}" + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", num_names=3)}}" # Get 5 data set names with specific middle level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", generations=5)}}" + clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", num_names=5)}}" RETURN: _value: diff --git a/tests/dependencyfinder.py b/tests/dependencyfinder.py index d82b7a9af9..2332bd3266 100755 --- a/tests/dependencyfinder.py +++ b/tests/dependencyfinder.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# Copyright (c) IBM Corporation 2020, 2022 +# Copyright (c) IBM Corporation 2020, 2025 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at From 1f8f9d00e2910a0f4a8d8083788197d83aee695d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 7 Nov 2025 12:12:11 -0600 Subject: [PATCH 16/31] Complete role --- .../filters/test_generate_data_set_name.py | 40 ++++++++++++++----- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/tests/functional/filters/test_generate_data_set_name.py b/tests/functional/filters/test_generate_data_set_name.py index d8e7a986be..da523e6f7a 100644 --- a/tests/functional/filters/test_generate_data_set_name.py +++ b/tests/functional/filters/test_generate_data_set_name.py @@ -48,19 +48,19 @@ def test_generate_data_set_name_mlq_multiple_generations_filter(ansible_zos_modu hosts = ansible_zos_module input_string = "OMVSADM" mlq = "mlqadm" - generations = 5 + num_names = 5 hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"middle_level_qualifier='{mlq}', " - f"generations={generations}" + f"num_names={num_names}" f") }}}}" ) results = hosts.all.debug(msg=jinja_expr) for result in results.contacted.values(): assert result.get('msg') is not None - assert len(result.get('msg')) == generations + assert len(result.get('msg')) == num_names assert input_string in result.get('msg')[0] assert mlq.upper() in result.get('msg')[0] @@ -85,19 +85,19 @@ def test_generate_data_set_name_llq_multiple_generations_filter(ansible_zos_modu hosts = ansible_zos_module input_string = "OMVSADM" llq = "llqadm" - generations = 5 + num_names = 5 hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"last_level_qualifier='{llq}', " - f"generations={generations}" + f"num_names={num_names}" f") }}}}" ) results = hosts.all.debug(msg=jinja_expr) for result in results.contacted.values(): assert result.get('msg') is not None - assert len(result.get('msg')) == generations + assert len(result.get('msg')) == num_names assert input_string in result.get('msg')[0] assert llq.upper() in result.get('msg')[0] @@ -125,20 +125,20 @@ def test_generate_data_set_name_mlq_llq_multiple_generations_filter(ansible_zos_ input_string = "OMVSADM" mlq = "mlqadm" llq = "llqadm" - generations = 3 + num_names = 3 hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"middle_level_qualifier='{mlq}', " f"last_level_qualifier='{llq}', " - f"generations={generations}" + f"num_names={num_names}" f") }}}}" ) results = hosts.all.debug(msg=jinja_expr) for result in results.contacted.values(): assert result.get('msg') is not None - assert len(result.get('msg')) == generations + assert len(result.get('msg')) == num_names assert input_string in result.get('msg')[0] assert mlq.upper() in result.get('msg')[0] assert llq.upper() in result.get('msg')[0] @@ -146,11 +146,11 @@ def test_generate_data_set_name_mlq_llq_multiple_generations_filter(ansible_zos_ def test_generate_data_set_name_filter_multiple_generations(ansible_zos_module): hosts = ansible_zos_module input_string = "OMVSADM" - generations = 10 + num_names = 10 hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" - f"generations={generations}" + f"num_names={num_names}" f") }}}}" ) results = hosts.all.debug(msg=jinja_expr) @@ -202,3 +202,21 @@ def test_generate_data_set_name_mlq_bad_llq(ansible_zos_module): for result in results.contacted.values(): assert result.get('failed') is True assert result.get('msg') == f"The qualifier {llq.upper()} is too long for the data set name." + +def test_generate_data_set_name_filter_no_hlq(ansible_zos_module): + hosts = ansible_zos_module + input_string = "OMVSADMONE" + results = hosts.all.debug(msg="{{ generate_data_set_name }}") + + for result in results.contacted.values(): + assert result.get('failed') is True + +def test_generate_data_set_name_filter_bad_hlq(ansible_zos_module): + hosts = ansible_zos_module + input_string = "" + hosts.all.set_fact(input_string=input_string) + results = hosts.all.debug(msg="{{ input_string | generate_data_set_name }}") + + for result in results.contacted.values(): + assert result.get('failed') is True + assert result.get('msg') == "Require to be provide a HLQ." \ No newline at end of file From e7bd73f54aa089a8e0e4081e40f29805867c5420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 7 Nov 2025 12:15:11 -0600 Subject: [PATCH 17/31] Fix sanity --- plugins/filter/generate_data_set_name.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index cc65a76013..9c327a9c05 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -64,7 +64,7 @@ def get_tmp_ds_name(middle_level_qualifier="", last_level_qualifier=""): ds = "." if bool(middle_level_qualifier): - ds+= middle_level_qualifier + "." + ds += middle_level_qualifier + "." else: ds += "P" + get_random_q() + "." From 65e19a3629e58782a74a1b2c5b23d1145db87f9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Fri, 7 Nov 2025 12:22:58 -0600 Subject: [PATCH 18/31] Fix sanity f --- plugins/filter/generate_data_set_name.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 9c327a9c05..b08a03e69b 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -28,7 +28,7 @@ def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifie list -- the total dataset names valid """ if value is None or value == "": - raise AnsibleFilterError(f"Require to be provide a HLQ.") + raise AnsibleFilterError("Require to be provide a HLQ.") hlq = validate_qualifier(qualifier=value) mlq = "" From 31f455b8b9bac0db0694b47b118ba33a07038b3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= <68956970+AndreMarcel99@users.noreply.github.com> Date: Tue, 11 Nov 2025 11:48:59 -0600 Subject: [PATCH 19/31] Update plugins/filter/generate_data_set_name.py Co-authored-by: Alex Moreno --- plugins/filter/generate_data_set_name.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index b08a03e69b..991c15f582 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -28,7 +28,7 @@ def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifie list -- the total dataset names valid """ if value is None or value == "": - raise AnsibleFilterError("Require to be provide a HLQ.") + raise AnsibleFilterError("A High-Level Qualifier is required.") hlq = validate_qualifier(qualifier=value) mlq = "" From bfb4709714da6d8885e473beab9b5de4ddf8d95e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= <68956970+AndreMarcel99@users.noreply.github.com> Date: Tue, 11 Nov 2025 11:49:11 -0600 Subject: [PATCH 20/31] Update plugins/filter/generate_data_set_name.py Co-authored-by: Alex Moreno --- plugins/filter/generate_data_set_name.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 991c15f582..dcf55f69ee 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -52,7 +52,7 @@ def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifie def get_tmp_ds_name(middle_level_qualifier="", last_level_qualifier=""): - """Unify the random qualifiers generate in one name. + """Unify the random qualifiers generated into one name. Args: middle_level_qualifier {str,optional} -- valid str of a qualifier From 31b0a9d0fa1ab51538d0e56f6b27000116047302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Tue, 11 Nov 2025 12:17:33 -0600 Subject: [PATCH 21/31] Update documentation --- plugins/filter/generate_data_set_name.py | 66 +++++++++++++++++++++++ plugins/filter/generate_data_set_name.yml | 59 -------------------- 2 files changed, 66 insertions(+), 59 deletions(-) delete mode 100644 plugins/filter/generate_data_set_name.yml diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index dcf55f69ee..9d15f69ac2 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -9,6 +9,72 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = r''' +name: generate_data_set_name +author: Marcel Gutierrez (@AndreMarcel99) +version_added: "2.0.0" +short_description: Filter returned valid data set names +description: + - Provide a valid temporary data set name. +options: + value: + description: + - High level qualifier. + type: str + required: true + middle_level_qualifier: + description: + - Possible valid middle level qualifier. + type: str + required: false + last_level_qualifier: + description: + - Possible valid last level qualifier. + type: str + required: false + num_names: + description: + - Number of data set names that you require to generate. + type: int + required: false +''' + +EXAMPLES = r''' +- name: Filter to get one data set name + set_fact: + data_set_name: {{ hlq | ibm.ibm_zos_core.generate_data_set_name}}" +- name: Filter to get a data set name with a specific middle level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM")}}" +- name: Filter to generate a data set name with a specific last level qualifier. + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM")}}" +- name: Filter to generate a data set name with a specific middle and last level qualifier. + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", last_level_qualifier="LLQADM")}}" +- name: Filter to generate 10 data set names + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10)}}" +- name: Filter to generate 3 data set names with a specific last level qualifier. + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", num_names=3)}}" +- name: Filter to generate 5 data set names with a specific middle level qualifier. + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", num_names=5)}}" +''' + +RETURN = r''' + _value: + description: Name or names generated by the filter. + type: list + elements: str +''' + + import secrets import string import re diff --git a/plugins/filter/generate_data_set_name.yml b/plugins/filter/generate_data_set_name.yml deleted file mode 100644 index 15a36592be..0000000000 --- a/plugins/filter/generate_data_set_name.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) IBM Corporation 2025 -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DOCUMENTATION: - name: generate_data_set_name - author: Marcel Gutierrez (@AndreMarcel99) - version_added: "2.0.0" - short_description: Filter returned valid data set names - description: - - Provide a valid temporary data set name. - options: - value: - description: - - High level qualifier. - type: str - required: true - middle_level_qualifier: - description: - - Possible valid middle level qualifier. - type: str - required: false - last_level_qualifier: - description: - - Possible valid last level qualifier. - type: str - required: false - num_names: - description: - - Number of data set names that you require to generate. - type: int - required: false - -EXAMPLES: | - # Get only one data set name. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name}}" - # Get a data set name with a specific middle level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM")}}" - # Get a data set name with a specific last level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM")}}" - # Get a data set name with a specific middle and last level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", last_level_qualifier="LLQADM")}}" - # Get 10 data set names. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10)}}" - # Get 3 data set names with a specific last level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", num_names=3)}}" - # Get 5 data set names with specific middle level qualifier. - clean_output: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", num_names=5)}}" - -RETURN: - _value: - description: Name or names generated by the filter. - type: list \ No newline at end of file From 8ddc14599d492340101d4d5c039494f4573b684e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Tue, 11 Nov 2025 12:24:22 -0600 Subject: [PATCH 22/31] Fix Examples documentation --- plugins/filter/generate_data_set_name.py | 43 ++++++++++++++---------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 9d15f69ac2..6bb14543be 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -45,28 +45,35 @@ EXAMPLES = r''' - name: Filter to get one data set name - set_fact: - data_set_name: {{ hlq | ibm.ibm_zos_core.generate_data_set_name}}" + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name }}" + - name: Filter to get a data set name with a specific middle level qualifier - set_fact: - data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM")}}" -- name: Filter to generate a data set name with a specific last level qualifier. - set_fact: - data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM")}}" -- name: Filter to generate a data set name with a specific middle and last level qualifier. - set_fact: - data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", last_level_qualifier="LLQADM")}}" + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM') }}" + +- name: Filter to generate a data set name with a specific last level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM') }}" + +- name: Filter to generate a data set name with a specific middle and last level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', last_level_qualifier='LLQADM') }}" + - name: Filter to generate 10 data set names - set_fact: - data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10)}}" -- name: Filter to generate 3 data set names with a specific last level qualifier. - set_fact: - data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier="LLQADM", num_names=3)}}" -- name: Filter to generate 5 data set names with a specific middle level qualifier. - set_fact: - data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier="MLQADM", num_names=5)}}" + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10) }}" + +- name: Filter to generate 3 data set names with a specific last level qualifier + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM', num_names=3) }}" + +- name: Filter to generate 5 data set names with a specific middle level qualifier + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', num_names=5) }}" ''' + RETURN = r''' _value: description: Name or names generated by the filter. From 5be31c4388569529e0af2dcf3dae8af5df6dfd4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Tue, 11 Nov 2025 12:25:37 -0600 Subject: [PATCH 23/31] Avoid extra space --- plugins/filter/generate_data_set_name.py | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 6bb14543be..13aa56e29f 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -73,7 +73,6 @@ data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', num_names=5) }}" ''' - RETURN = r''' _value: description: Name or names generated by the filter. From 13cc71b6918414c2ad0b5a8808f2e3e908020526 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 13:42:18 -0600 Subject: [PATCH 24/31] Added documentation for filters --- ansible.cfg | 3 +- docs/Makefile | 38 + docs/source/filters/filter_wtor_messages.rst | 95 + .../source/filters/generate_data_set_name.rst | 113 + docs/source/modules/zos_apf.rst | 332 --- docs/source/modules/zos_archive.rst | 600 ----- docs/source/modules/zos_backup_restore.rst | 443 ---- docs/source/modules/zos_blockinfile.rst | 412 ---- docs/source/modules/zos_copy.rst | 1187 ---------- docs/source/modules/zos_data_set.rst | 886 ------- docs/source/modules/zos_encode.rst | 335 --- docs/source/modules/zos_fetch.rst | 359 --- docs/source/modules/zos_find.rst | 398 ---- docs/source/modules/zos_gather_facts.rst | 138 -- docs/source/modules/zos_job_output.rst | 523 ----- docs/source/modules/zos_job_query.rst | 389 --- docs/source/modules/zos_job_submit.rst | 895 ------- docs/source/modules/zos_lineinfile.rst | 351 --- docs/source/modules/zos_mount.rst | 628 ----- docs/source/modules/zos_mvs_raw.rst | 2083 ----------------- docs/source/modules/zos_operator.rst | 212 -- .../modules/zos_operator_action_query.rst | 259 -- docs/source/modules/zos_ping.rst | 88 - docs/source/modules/zos_replace.rst | 304 --- docs/source/modules/zos_script.rst | 419 ---- docs/source/modules/zos_started_task.rst | 530 ----- docs/source/modules/zos_stat.rst | 1326 ----------- docs/source/modules/zos_tso_command.rst | 171 -- docs/source/modules/zos_unarchive.rst | 555 ----- docs/source/modules/zos_volume_init.rst | 271 --- docs/source/modules/zos_zfs_resize.rst | 314 --- docs/source/roles/job_status.rst | 77 - 32 files changed, 247 insertions(+), 14487 deletions(-) create mode 100644 docs/source/filters/filter_wtor_messages.rst create mode 100644 docs/source/filters/generate_data_set_name.rst delete mode 100644 docs/source/modules/zos_apf.rst delete mode 100644 docs/source/modules/zos_archive.rst delete mode 100644 docs/source/modules/zos_backup_restore.rst delete mode 100644 docs/source/modules/zos_blockinfile.rst delete mode 100644 docs/source/modules/zos_copy.rst delete mode 100644 docs/source/modules/zos_data_set.rst delete mode 100644 docs/source/modules/zos_encode.rst delete mode 100644 docs/source/modules/zos_fetch.rst delete mode 100644 docs/source/modules/zos_find.rst delete mode 100644 docs/source/modules/zos_gather_facts.rst delete mode 100644 docs/source/modules/zos_job_output.rst delete mode 100644 docs/source/modules/zos_job_query.rst delete mode 100644 docs/source/modules/zos_job_submit.rst delete mode 100644 docs/source/modules/zos_lineinfile.rst delete mode 100644 docs/source/modules/zos_mount.rst delete mode 100644 docs/source/modules/zos_mvs_raw.rst delete mode 100644 docs/source/modules/zos_operator.rst delete mode 100644 docs/source/modules/zos_operator_action_query.rst delete mode 100644 docs/source/modules/zos_ping.rst delete mode 100644 docs/source/modules/zos_replace.rst delete mode 100644 docs/source/modules/zos_script.rst delete mode 100644 docs/source/modules/zos_started_task.rst delete mode 100644 docs/source/modules/zos_stat.rst delete mode 100644 docs/source/modules/zos_tso_command.rst delete mode 100644 docs/source/modules/zos_unarchive.rst delete mode 100644 docs/source/modules/zos_volume_init.rst delete mode 100644 docs/source/modules/zos_zfs_resize.rst delete mode 100644 docs/source/roles/job_status.rst diff --git a/ansible.cfg b/ansible.cfg index b091247d73..b452495ff1 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,5 @@ ################################################################################ -# Copyright (c) IBM Corporation 2020, 2025 +# Copyright (c) IBM Corporation 2020, 2021 ################################################################################ ################################################################################ @@ -29,7 +29,6 @@ [defaults] forks = 25 action_plugins=~/.ansible/collections/ansible_collections/ibm/ibm_zos_core/plugins/action -filter_plugins=~/.ansible/collections/ansible_collections/ibm/ibm_zos_core/plugins/filter # remote_tmp = /u/ansible/tmp # remote_port = 22 # debug = True diff --git a/docs/Makefile b/docs/Makefile index cbc1f2b419..4b7f3d687d 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -81,6 +81,11 @@ clean: rm -rf source/roles; \ fi + @if test -d source/filters; then \ + echo "Deleting filters '$(ROOT_DIR)/source/filters'."; \ + rm -rf source/filters; \ + fi + @if test -d ../plugins/modules/rexx_module_doc; then \ echo "Deleting directory '../plugins/modules/rexx_module_doc'."; \ rm -rf ../plugins/modules/rexx_module_doc; \ @@ -93,6 +98,39 @@ clean: @echo "Completed cleanup, run 'make module-doc' or 'make role-doc'." +filter-doc: + @echo $(line_header) + @echo "Running Target filter-doc" + @echo $(line_header) + + @if ! test -d build; then \ + mkdir build; \ + echo "Make $(ROOT_DIR)/build directory for Sphinx generated HTML."; \ + fi + + @if ! test -d source/filters; then \ + mkdir -p source/filters; \ + echo "Make $(ROOT_DIR)/source/filters directory for Sphinx generated HTML."; \ + fi + + @if test -e ../plugins/filter/__init__.py; then \ + echo "Rename file '../plugins/filter/__init__.py' to ../plugins/filter/__init__.py.skip to avoid reading empty python file.'"; \ + mv ../plugins/filter/__init__.py ../plugins/filter/__init__.py.skip; \ + fi + + @echo "Generating ReStructuredText for all ansible modules found at '../plugins/filter/' to 'source/filters'." + @ansible-doc-extractor --template templates/module.rst.j2 source/filters ../plugins/filter/*.py + + + @if test -e ../plugins/filter/__init__.py.skip; then \ + echo "Rename file '../plugins/filter/__init__.py.skip' back to ../plugins/filter/__init__.py.'"; \ + mv -f ../plugins/filter/__init__.py.skip ../plugins/filter/__init__.py; \ + fi + + @echo $(line_header) + @echo "Completed ReStructuredText generation for filters; next run 'make html'" + @echo $(line_header) + role-doc: @echo $(line_header) @echo "Running Target role-doc" diff --git a/docs/source/filters/filter_wtor_messages.rst b/docs/source/filters/filter_wtor_messages.rst new file mode 100644 index 0000000000..cd35fff071 --- /dev/null +++ b/docs/source/filters/filter_wtor_messages.rst @@ -0,0 +1,95 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/filter_wtor_messages.py + +.. _filter_wtor_messages_module: + + +filter_wtor_messages -- Filter a list of WTOR messages +====================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Filter a list of WTOR (write to operator with reply) messages found by module zos_operator_action_query. +- Filter using a string or regular expression. + + + + + +Parameters +---------- + + +wtor_response + A list containing response property `message_text`, provided the module zos_operator_action_query. + + The list can be the outstanding messages found in the modules response under the `actions` property or the entire module response. + + | **required**: True + | **type**: list + + +text + String of text to match or a regular expression to use as filter criteria. + + | **required**: True + | **type**: str + + +ingore_case + Should the filter enable case sensitivity when performing a match. + + | **required**: False + | **type**: bool + | **default**: False + + + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Filter actionable messages that match 'IEE094D SPECIFY OPERAND' and if so, set is_specify_operand = true. + set_fact: + is_specify_operand: "{{ result | ibm.ibm_zos_core.filter_wtor_messages('IEE094D SPECIFY OPERAND') }}" + when: result is defined and not result.failed + + - name: Evaluate if there are any existing dump messages matching 'IEE094D SPECIFY OPERAND' + assert: + that: + - is_specify_operand is defined + - bool_zos_operator_action_continue + success_msg: "Found 'IEE094D SPECIFY OPERAND' message." + fail_msg: "Did not find 'IEE094D SPECIFY OPERAND' message." + + + + + + + + + + +Return Values +------------- + + +_value + A list containing dictionaries matching the WTOR. + + | **type**: list + | **elements**: dict + diff --git a/docs/source/filters/generate_data_set_name.rst b/docs/source/filters/generate_data_set_name.rst new file mode 100644 index 0000000000..f3d24ce5bf --- /dev/null +++ b/docs/source/filters/generate_data_set_name.rst @@ -0,0 +1,113 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/generate_data_set_name.py + +.. _generate_data_set_name_module: + + +generate_data_set_name -- Filter returned valid data set names +============================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Provide a valid temporary data set name. + + + + + +Parameters +---------- + + +value + High level qualifier. + + | **required**: True + | **type**: str + + +middle_level_qualifier + Possible valid middle level qualifier. + + | **required**: False + | **type**: str + + +last_level_qualifier + Possible valid last level qualifier. + + | **required**: False + | **type**: str + + +num_names + Number of data set names that you require to generate. + + | **required**: False + | **type**: int + + + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Filter to get one data set name + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name }}" + + - name: Filter to get a data set name with a specific middle level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM') }}" + + - name: Filter to generate a data set name with a specific last level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM') }}" + + - name: Filter to generate a data set name with a specific middle and last level qualifier + set_fact: + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', last_level_qualifier='LLQADM') }}" + + - name: Filter to generate 10 data set names + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10) }}" + + - name: Filter to generate 3 data set names with a specific last level qualifier + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM', num_names=3) }}" + + - name: Filter to generate 5 data set names with a specific middle level qualifier + set_fact: + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', num_names=5) }}" + + + + + + + + + + +Return Values +------------- + + +_value + Name or names generated by the filter. + + | **type**: list + | **elements**: str + diff --git a/docs/source/modules/zos_apf.rst b/docs/source/modules/zos_apf.rst deleted file mode 100644 index 215de08519..0000000000 --- a/docs/source/modules/zos_apf.rst +++ /dev/null @@ -1,332 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_apf.py - -.. _zos_apf_module: - - -zos_apf -- Add or remove libraries to Authorized Program Facility (APF) -======================================================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Adds or removes libraries to Authorized Program Facility (APF). -- Manages APF statement persistent entries to a data set or data set member. -- Changes APF list format to "DYNAMIC" or "STATIC". -- Gets the current APF list entries. - - - - - -Parameters ----------- - - -library - The library name to be added or removed from the APF list. - - | **required**: False - | **type**: str - - -state - Ensure that the library is added ``state=present`` or removed ``state=absent``. - - The APF list format has to be "DYNAMIC". - - | **required**: False - | **type**: str - | **default**: present - | **choices**: absent, present - - -force_dynamic - Will force the APF list format to "DYNAMIC" before adding or removing libraries. - - If the format is "STATIC", the format will be changed to "DYNAMIC". - - | **required**: False - | **type**: bool - | **default**: False - - -volume - The identifier for the volume containing the library specified in the ``library`` parameter. The values must be one the following. - - 1. The volume serial number. - - 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. - - If ``volume`` is not specified, ``library`` has to be cataloged. - - | **required**: False - | **type**: str - - -sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - - If ``sms=True``, ``volume`` value will be ignored. - - | **required**: False - | **type**: bool - | **default**: False - - -operation - Change APF list format to "DYNAMIC" ``operation=set_dynamic`` or "STATIC" ``operation=set_static`` - - Display APF list current format ``operation=check_format`` - - Display APF list entries when ``operation=list`` ``library``, ``volume`` and ``sms`` will be used as filters. - - If ``operation`` is not set, add or remove operation will be ignored. - - | **required**: False - | **type**: str - | **choices**: set_dynamic, set_static, check_format, list - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -persistent - Add/remove persistent entries to or from *data_set_name* - - ``library`` will not be persisted or removed if ``persistent=None`` - - | **required**: False - | **type**: dict - - - data_set_name - The data set name used for persisting or removing a ``library`` from the APF list. - - | **required**: True - | **type**: str - - - marker - The marker line template. - - ``{mark}`` will be replaced with "BEGIN" and "END". - - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. - - ``{mark}`` length may not exceed 72 characters. - - The timestamp () used in the default marker follows the '+%Y%m%d-%H%M%S' date format - - | **required**: False - | **type**: str - | **default**: /* {mark} ANSIBLE MANAGED BLOCK \*/ - - - backup - Creates a backup file or backup data set for *data_set_name*, including the timestamp information to ensure that you retrieve the original APF list defined in *data_set_name*". - - *backup_name* can be used to specify a backup file name if *backup=true*. - - The backup file name will be return on either success or failure of module execution such that data can be retrieved. - - | **required**: False - | **type**: bool - | **default**: False - - - backup_name - Specify the USS file name or data set name for the destination backup. - - If the source *data_set_name* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. - - If the source is an MVS data set, the backup_name must be an MVS data set name. - - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. - - If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - - | **required**: False - | **type**: str - - - -batch - A list of dictionaries for adding or removing libraries. - - This is mutually exclusive with ``library``, ``volume``, ``sms`` - - Can be used with ``persistent`` - - | **required**: False - | **type**: list - | **elements**: dict - - - library - The library name to be added or removed from the APF list. - - | **required**: True - | **type**: str - - - volume - The identifier for the volume containing the library specified on the ``library`` parameter. The values must be one of the following. - - 1. The volume serial number - - 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. - - 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. - - If ``volume`` is not specified, ``library`` has to be cataloged. - - | **required**: False - | **type**: str - - - sms - Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. - - If true ``volume`` will be ignored. - - | **required**: False - | **type**: bool - | **default**: False - - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Add a library to the APF list - zos_apf: - library: SOME.SEQUENTIAL.DATASET - volume: T12345 - - name: Add a library (cataloged) to the APF list and persistence - zos_apf: - library: SOME.SEQUENTIAL.DATASET - force_dynamic: true - persistent: - data_set_name: SOME.PARTITIONED.DATASET(MEM) - - name: Remove a library from the APF list and persistence - zos_apf: - state: absent - library: SOME.SEQUENTIAL.DATASET - volume: T12345 - persistent: - data_set_name: SOME.PARTITIONED.DATASET(MEM) - - name: Batch libraries with custom marker, persistence for the APF list - zos_apf: - persistent: - data_set_name: "SOME.PARTITIONED.DATASET(MEM)" - marker: "/* {mark} PROG001 USR0010 */" - batch: - - library: SOME.SEQ.DS1 - - library: SOME.SEQ.DS2 - sms: true - - library: SOME.SEQ.DS3 - volume: T12345 - - name: Print the APF list matching library pattern or volume serial number - zos_apf: - operation: list - library: SOME.SEQ.* - volume: T12345 - - name: Set the APF list format to STATIC - zos_apf: - operation: set_static - - - - -Notes ------ - -.. note:: - It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF® FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - To add or delete the APF list entry for library libname, you must have UPDATE authority to the RACF® FACILITY resource class entity CSVAPF.libname, or there must be no FACILITY class profile that protects that entity. - - To change the format of the APF list to dynamic, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.DYNAMIC, or there must be no FACILITY class profile that protects that entity. - - To change the format of the APF list back to static, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.STATIC, or there must be no FACILITY class profile that protects that entity. - - - - - - - -Return Values -------------- - - -stdout - The stdout from ZOAU command apfadm. Output varies based on the type of operation. - - state> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm - - operation> stdout of operation options list> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set_dynamic> Set to DYNAMIC set_static> Set to STATIC check_format> DYNAMIC or STATIC - - | **returned**: always - | **type**: str - -stderr - The error messages from ZOAU command apfadm - - | **returned**: always - | **type**: str - | **sample**: BGYSC1310E ADD Error: Dataset COMMON.LINKLIB volume COMN01 is already present in APF list. - -rc - The return code from ZOAU command apfadm - - | **returned**: always - | **type**: int - -msg - The module messages - - | **returned**: failure - | **type**: str - | **sample**: Parameter verification failed - -backup_name - Name of the backup file or data set that was created. - - | **returned**: if backup=true, always - | **type**: str - diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst deleted file mode 100644 index 2a51654019..0000000000 --- a/docs/source/modules/zos_archive.rst +++ /dev/null @@ -1,600 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_archive.py - -.. _zos_archive_module: - - -zos_archive -- Archive files and data sets on z/OS. -=================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Create or extend an archive on a remote z/OS system. -- Sources for archiving must be on the remote z/OS system. -- Supported sources are USS (UNIX System Services) or z/OS data sets. -- The archive remains on the remote z/OS system. -- For supported archive formats, see option ``format``. - - - - - -Parameters ----------- - - -src - List of names or globs of UNIX System Services (USS) files, PS (sequential data sets), PDS, PDSE to compress or archive. - - USS file paths should be absolute paths. - - GDS relative notation is supported. - - MVS data sets supported types are: ``SEQ``, ``PDS``, ``PDSE``. - - VSAMs are not supported. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - | **required**: True - | **type**: list - | **elements**: str - - -format - The compression type and corresponding options to use when archiving data. - - | **required**: False - | **type**: dict - - - name - The compression format to use. - - | **required**: False - | **type**: str - | **default**: gz - | **choices**: bz2, gz, tar, zip, terse, xmit, pax - - - format_options - Options specific to a compression format. - - | **required**: False - | **type**: dict - - - terse_pack - Compression option for use with the terse format, *name=terse*. - - Pack will compress records in a data set so that the output results in lossless data compression. - - Spack will compress records in a data set so the output results in complex data compression. - - Spack will produce smaller output and take approximately 3 times longer than pack compression. - - | **required**: False - | **type**: str - | **choices**: pack, spack - - - xmit_log_data_set - Provide the name of a data set to store xmit log output. - - If the data set provided does not exist, the program will create it. - - If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - - When providing the *xmit_log_data_set* name, ensure there is adequate space. - - | **required**: False - | **type**: str - - - use_adrdssu - If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using ``xmit`` or ``terse``. - - | **required**: False - | **type**: bool - | **default**: False - - - - -dest - The remote absolute path or data set where the archive should be created. - - *dest* can be a USS file or MVS data set name. - - If *dest* has missing parent directories, they will be created. - - If *dest* is a nonexistent USS file, it will be created. - - If *dest* is an existing file or data set and *force=true*, the existing *dest* will be deleted and recreated with attributes defined in the *dest_data_set* option or computed by the module. - - If *dest* is an existing file or data set and *force=false* or not specified, the module exits with a note to the user. - - Destination data set attributes can be set using *dest_data_set*. - - Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the *dest_data_set* option will improve performance. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - | **required**: True - | **type**: str - - -exclude - Remote absolute path, glob, or list of paths, globs, data set name patterns or generation data sets (GDSs) in relative notation for the file, files or data sets to exclude from src list and glob expansion. - - Patterns (wildcards) can contain one of the following, `?`, `*`. - - * matches everything. - - ? matches any single character. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - | **required**: False - | **type**: list - | **elements**: str - - -group - Name of the group that will own the archive file. - - When left unspecified, it uses the current group of the current use unless you are root, in which case it can preserve the previous ownership. - - This option is only applicable if ``dest`` is USS, otherwise ignored. - - | **required**: False - | **type**: str - - -mode - The permission of the destination archive file. - - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - - The mode may also be specified as a symbolic mode (for example, 'u+rwx' or 'u=rw,g=r,o=r') or a special string 'preserve'. - - *mode=preserve* means that the file will be given the same permissions as the src file. - - | **required**: False - | **type**: str - - -owner - Name of the user that should own the archive file, as would be passed to the chown command. - - When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - - This option is only applicable if ``dest`` is USS, otherwise ignored. - - | **required**: False - | **type**: str - - -remove - Remove any added source files , trees or data sets after module `zos_archive <./zos_archive.html>`_ adds them to the archive. Source files, trees and data sets are identified with option *src*. - - | **required**: False - | **type**: bool - | **default**: False - - -dest_data_set - Data set attributes to customize a ``dest`` data set to be archived into. - - | **required**: False - | **type**: dict - - - name - Desired name for destination dataset. - - | **required**: False - | **type**: str - - - type - Organization of the destination - - | **required**: False - | **type**: str - | **default**: seq - | **choices**: seq - - - space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_type - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - | **required**: False - | **type**: str - | **choices**: k, m, g, cyl, trk - - - record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) - - Choices are case-sensitive. - - | **required**: False - | **type**: str - | **choices**: fb, vb, fba, vba, u - - - record_length - The length of each record in the data set, in bytes. - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - | **required**: False - | **type**: int - - - block_size - The block size to use for the data set. - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - sms_storage_class - The storage class for an SMS-managed dataset. - - Required for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_data_class - The data class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_management_class - The management class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary data sets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -force - If set to ``true`` and the remote file or data set ``dest`` will be deleted. Otherwise it will be created with the ``dest_data_set`` attributes or default values if ``dest_data_set`` is not specified. - - If set to ``false``, the file or data set will only be copied if the destination does not exist. - - If set to ``false`` and destination exists, the module exits with a note to the user. - - | **required**: False - | **type**: bool - | **default**: False - - -encoding - Specifies the character encoding conversion to be applied to the source files before archiving. - - Supported character sets rely on the charset conversion utility ``iconv`` version the most common character sets are supported. - - After conversion the files are stored in same location and name as src and the same src is taken in consideration for archive. - - Source files will be converted to the new encoding and will not be restored to their original encoding. - - If encoding fails for any file in a set of multiple files, an exception will be raised and archiving will be skipped. - - The original files in ``src`` will be converted. The module will revert the encoding conversion after a successful archive, but no backup will be created. If you need to encode using a backup and then archive take a look at `zos_encode <./zos_encode.html>`_ module. - - | **required**: False - | **type**: dict - - - from - The character set of the source *src*. - - | **required**: False - | **type**: str - - - to - The destination *dest* character set for the files to be written as. - - | **required**: False - | **type**: str - - - skip_encoding - List of names to skip encoding before archiving. This is only used if *encoding* is set, otherwise is ignored. - - | **required**: False - | **type**: list - | **elements**: str - - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - # Simple archive - - name: Archive file into a tar - zos_archive: - src: /tmp/archive/foo.txt - dest: /tmp/archive/foo_archive_test.tar - format: - name: tar - - # Archive multiple files - - name: Archive list of files into a zip - zos_archive: - src: - - /tmp/archive/foo.txt - - /tmp/archive/bar.txt - dest: /tmp/archive/foo_bar_archive_test.zip - format: - name: zip - - # Archive one data set into terse - - name: Archive data set into a terse - zos_archive: - src: "USER.ARCHIVE.TEST" - dest: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - - # Use terse with different options - - name: Archive data set into a terse, specify pack algorithm and use adrdssu - zos_archive: - src: "USER.ARCHIVE.TEST" - dest: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - format_options: - terse_pack: "spack" - use_adrdssu: true - - # Use a pattern to store - - name: Archive data set pattern using xmit - zos_archive: - src: "USER.ARCHIVE.*" - exclude_sources: "USER.ARCHIVE.EXCLUDE.*" - dest: "USER.ARCHIVE.RESULT.XMIT" - format: - name: xmit - - - name: Archive multiple GDSs into a terse - zos_archive: - src: - - "USER.GDG(0)" - - "USER.GDG(-1)" - - "USER.GDG(-2)" - dest: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - format_options: - use_adrdssu: true - - - name: Archive multiple data sets into a new GDS - zos_archive: - src: "USER.ARCHIVE.*" - dest: "USER.GDG(+1)" - format: - name: terse - format_options: - use_adrdssu: true - - - name: Encode the source data set into Latin-1 before archiving into a terse data set - zos_archive: - src: "USER.ARCHIVE.TEST" - dest: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Encode and archive multiple data sets but skip encoding for a few. - zos_archive: - src: - - "USER.ARCHIVE1.TEST" - - "USER.ARCHIVE2.TEST" - dest: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - format_options: - use_adrdssu: true - encoding: - from: IBM-1047 - to: ISO8859-1 - skip_encoding: - - "USER.ARCHIVE2.TEST" - - - - -Notes ------ - -.. note:: - This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos_fetch to retrieve to the controller and then zos_copy or zos_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. - - When packing and using ``use_adrdssu`` flag the module will take up to two times the space indicated in ``dest_data_set``. - - tar, zip, bz2 and pax are archived using python ``tarfile`` library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_fetch_module` - - :ref:`zos_unarchive_module` - - - - -Return Values -------------- - - -state - The state of the input ``src``. - - ``absent`` when the source files or data sets were removed. - - ``present`` when the source files or data sets were not removed. - - ``incomplete`` when ``remove`` was true and the source files or data sets were not removed. - - | **returned**: always - | **type**: str - -dest_state - The state of the *dest* file or data set. - - ``absent`` when the file does not exist. - - ``archive`` when the file is an archive. - - ``compress`` when the file is compressed, but not an archive. - - ``incomplete`` when the file is an archive, but some files under *src* were not found. - - | **returned**: success - | **type**: str - -missing - Any files or data sets that were missing from the source. - - | **returned**: success - | **type**: list - -archived - Any files or data sets that were compressed or added to the archive. - - | **returned**: success - | **type**: list - -arcroot - If ``src`` is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. - - | **returned**: always - | **type**: str - -expanded_sources - The list of matching paths from the src option. - - | **returned**: always - | **type**: list - -expanded_exclude_sources - The list of matching exclude paths from the exclude option. - - | **returned**: always - | **type**: list - -encoded - List of files or data sets that were successfully encoded. - - | **returned**: success - | **type**: list - -failed_on_encoding - List of files or data sets that were failed while encoding. - - | **returned**: success - | **type**: list - -skipped_encoding_targets - List of files or data sets that were skipped while encoding. - - | **returned**: success - | **type**: list - diff --git a/docs/source/modules/zos_backup_restore.rst b/docs/source/modules/zos_backup_restore.rst deleted file mode 100644 index f335ca632c..0000000000 --- a/docs/source/modules/zos_backup_restore.rst +++ /dev/null @@ -1,443 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_backup_restore.py - -.. _zos_backup_restore_module: - - -zos_backup_restore -- Backup and restore data sets and volumes -============================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Create and restore from backups of data sets and volumes. -- Data set backups are performed using logical dumps, volume backups are performed using physical dumps. -- Backups are compressed using AMATERSE. -- Backups are created by first dumping data sets with ADRDSSU, followed by compression with AMATERSE. -- Restoration is performed by first decompressing an archive with AMATERSE, then restoring with ADRDSSU. -- Since ADRDSSU and AMATERSE are used to create and restore backups, backups can be restored to systems where Ansible and ZOAU are not available. Conversely, dumps created with ADRDSSU and AMATERSE can be restored using this module. - - - - - -Parameters ----------- - - -operation - Used to specify the operation to perform. - - | **required**: True - | **type**: str - | **choices**: backup, restore - - -data_sets - Determines which data sets to include in the backup. - - | **required**: False - | **type**: dict - - - include - When *operation=backup*, specifies a list of data sets or data set patterns to include in the backup. - - When *operation=backup* GDS relative names are supported. - - When *operation=restore*, specifies a list of data sets or data set patterns to include when restoring from a backup. - - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. - - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. - - Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - - A question mark ``?`` or percent sign ``%`` matches a single character. - - | **required**: False - | **type**: raw - - - exclude - When *operation=backup*, specifies a list of data sets or data set patterns to exclude from the backup. - - When *operation=backup* GDS relative names are supported. - - When *operation=restore*, specifies a list of data sets or data set patterns to exclude when restoring from a backup. - - The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." - - When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. - - Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. - - A question mark ``?`` or percent sign ``%`` matches a single character. - - | **required**: False - | **type**: raw - - - -volume - This applies to both data set restores and volume restores. - - When *operation=backup* and *data_sets* are provided, specifies the volume that contains the data sets to backup. - - When *operation=restore*, specifies the volume the backup should be restored to. - - *volume* is required when restoring a full volume backup. - - | **required**: False - | **type**: str - - -full_volume - When *operation=backup* and *full_volume=True*, specifies that the entire volume provided to *volume* should be backed up. - - When *operation=restore* and *full_volume=True*, specifies that the volume should be restored (default is dataset). - - *volume* must be provided when *full_volume=True*. - - | **required**: False - | **type**: bool - | **default**: False - - -temp_volume - Specifies a particular volume on which the temporary data sets should be created during the backup and restore process. - - When *operation=backup* and *backup_name* is a data set, specifies the volume the backup should be placed in. - - | **required**: False - | **type**: str - - -backup_name - When *operation=backup*, the destination data set or UNIX file to hold the backup. - - When *operation=restore*, the destination data set or UNIX file backup to restore. - - There are no enforced conventions for backup names. However, using a common extension like ``.dzp`` for UNIX files and ``.DZP`` for data sets will improve readability. - - GDS relative names are supported when *operation=restore*. - - | **required**: True - | **type**: str - - -recover - When *recover=true* and *operation=backup* then potentially recoverable errors will be ignored. - - | **required**: False - | **type**: bool - | **default**: False - - -overwrite - When *operation=backup*, specifies if an existing data set or UNIX file matching *backup_name* should be deleted. - - When *operation=restore*, specifies if the module should overwrite existing data sets with matching name on the target device. - - | **required**: False - | **type**: bool - | **default**: False - - -compress - When *operation=backup*, enables compression of partitioned data sets using system-level compression features. If supported, this may utilize zEDC hardware compression. - - This option can reduce the size of the temporary dataset generated during backup operations either before the AMATERSE step when *terse* is True or the resulting backup when *terse* is False. - - | **required**: False - | **type**: bool - | **default**: False - - -terse - When *operation=backup*, executes an AMATERSE step to compress and pack the temporary data set for the backup. This creates a backup with a format suitable for transferring off-platform. - - If *operation=backup* and if *dataset=False* then option *terse* must be True. - - | **required**: False - | **type**: bool - | **default**: True - - -sms_storage_class - When *operation=restore*, specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. - - When *operation=backup*, specifies the storage class to use for temporary data sets created during backup process. - - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. - - | **required**: False - | **type**: str - - -sms_management_class - When *operation=restore*, specifies the management class to use. The management class will also be used for temporary data sets created during restore process. - - When *operation=backup*, specifies the management class to use for temporary data sets created during backup process. - - If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. - - | **required**: False - | **type**: str - - -space - If *operation=backup*, specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. - - If *operation=restore*, specifies the amount of space to allocate for data sets temporarily created during the restore process. - - The unit of space used is set using *space_type*. - - When *full_volume=True*, *space* defaults to ``1``, otherwise default is ``25`` - - | **required**: False - | **type**: int - - -space_type - The unit of measurement to use when defining data set space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - When *full_volume=True*, *space_type* defaults to ``g``, otherwise default is ``m`` - - | **required**: False - | **type**: str - | **default**: m - | **choices**: k, m, g, cyl, trk - - -hlq - Specifies the new HLQ to use for the data sets being restored. - - If no value is provided, the data sets will be restored with their original HLQs. - - | **required**: False - | **type**: str - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary data sets used in the module's operation. - - If *tmp_hlq* is set, this value will be applied to all temporary data sets. - - If *tmp_hlq* is not set, the value will be the username who submits the ansible task, this is the default behavior. If the username can not be identified, the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Backup all data sets matching the pattern USER.** to data set MY.BACKUP.DZP - zos_backup_restore: - operation: backup - data_sets: - include: user.** - backup_name: MY.BACKUP.DZP - - - name: Backup all data sets matching the patterns USER.** or PRIVATE.TEST.* - excluding data sets matching the pattern USER.PRIVATE.* to data set MY.BACKUP.DZP - zos_backup_restore: - operation: backup - data_sets: - include: - - user.** - - private.test.* - exclude: user.private.* - backup_name: MY.BACKUP.DZP - - - name: Backup a list of GDDs to data set my.backup.dzp - zos_backup_restore: - operation: backup - data_sets: - include: - - user.gdg(-1) - - user.gdg(0) - backup_name: my.backup.dzp - - - name: Backup datasets using compress - zos_backup_restore: - operation: backup - compress: true - terse: true - data_sets: - include: someds.name.here - backup_name: my.backup.dzp - - - name: Backup all datasets matching the pattern USER.** to UNIX file /tmp/temp_backup.dzp, ignore recoverable errors. - zos_backup_restore: - operation: backup - data_sets: - include: user.** - backup_name: /tmp/temp_backup.dzp - recover: true - - - name: Backup all datasets matching the pattern USER.** to data set MY.BACKUP.DZP, - allocate 100MB for data sets used in backup process. - zos_backup_restore: - operation: backup - data_sets: - include: user.** - backup_name: MY.BACKUP.DZP - space: 100 - space_type: m - - - name: - Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, - allocate 100MB for data sets used in the backup process. - zos_backup_restore: - operation: backup - data_sets: - include: user.** - volume: MYVOL1 - backup_name: MY.BACKUP.DZP - space: 100 - space_type: m - - - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, - allocate 1GB for data sets used in backup process. - zos_backup_restore: - operation: backup - backup_name: /tmp/temp_backup.dzp - volume: MYVOL1 - full_volume: true - space: 1 - space_type: g - - - name: Restore data sets from a backup stored in the UNIX file /tmp/temp_backup.dzp. - Restore the data sets with the original high level qualifiers. - zos_backup_restore: - operation: restore - backup_name: /tmp/temp_backup.dzp - - - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. - Only restore data sets whose last, or only qualifier is TEST. - Use MYHLQ as the new HLQ for restored data sets. - zos_backup_restore: - operation: restore - data_sets: - include: "**.TEST" - backup_name: /tmp/temp_backup.dzp - hlq: MYHLQ - - - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. - Only restore data sets whose last, or only qualifier is TEST. - Use MYHLQ as the new HLQ for restored data sets. Restore data sets to volume MYVOL2. - zos_backup_restore: - operation: restore - data_sets: - include: "**.TEST" - volume: MYVOL2 - backup_name: /tmp/temp_backup.dzp - hlq: MYHLQ - - - name: Restore data sets from backup stored in the data set MY.BACKUP.DZP. - Use MYHLQ as the new HLQ for restored data sets. - zos_backup_restore: - operation: restore - backup_name: MY.BACKUP.DZP - hlq: MYHLQ - - - name: Restore volume from backup stored in the data set MY.BACKUP.DZP. - Restore to volume MYVOL2. - zos_backup_restore: - operation: restore - volume: MYVOL2 - full_volume: true - backup_name: MY.BACKUP.DZP - space: 1 - space_type: g - - - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. - Specify DB2SMS10 for the SMS storage and management classes to use for the restored - data sets. - zos_backup_restore: - operation: restore - volume: MYVOL2 - backup_name: /tmp/temp_backup.dzp - sms_storage_class: DB2SMS10 - sms_management_class: DB2SMS10 - - - - -Notes ------ - -.. note:: - It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF FACILITY resource class. A user is described as the remote user, configured to run either the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - When using this module, if the RACF FACILITY class profile **STGADMIN.ADR.DUMP.TOLERATE.ENQF** is active, you must have READ access authority to use the module option *recover=true*. If the RACF FACILITY class checking is not set up, any user can use the module option without access to the class. - - If your system uses a different security product, consult that product's documentation to configure the required security classes. - - - - - - - -Return Values -------------- - - -changed - Indicates if the operation made changes. - - ``true`` when backup/restore was successful, ``false`` otherwise. - - | **returned**: always - | **type**: bool - | **sample**: - - .. code-block:: json - - true - -backup_name - The USS file name or data set name that was used as a backup. - - Matches the *backup_name* parameter provided as input. - - | **returned**: always - | **type**: str - | **sample**: /u/oeusr03/my_backup.dzp - -message - Returns any important messages about the modules execution, if any. - - | **returned**: always - | **type**: str - diff --git a/docs/source/modules/zos_blockinfile.rst b/docs/source/modules/zos_blockinfile.rst deleted file mode 100644 index 041182ca10..0000000000 --- a/docs/source/modules/zos_blockinfile.rst +++ /dev/null @@ -1,412 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_blockinfile.py - -.. _zos_blockinfile_module: - - -zos_blockinfile -- Manage block of multi-line textual data on z/OS -================================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Manage block of multi-lines in z/OS UNIX System Services (USS) files, PS (sequential data set), PDS, PDSE, or member of a PDS or PDSE. -- This module ensures a particular block of multi-line text surrounded by customizable marker lines is present in a USS file or data set, or replaces an existing block identified by the markers. -- This is primarily useful when you want to change a block of multi-line text in a USS file or data set. - - - - - -Parameters ----------- - - -src - The location can be a UNIX System Services (USS) file, PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. - - The USS file must be an absolute pathname. - - Generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). - - | **required**: True - | **type**: str - - -state - Whether the block should be inserted or replaced using *state=present*. - - Whether the block should be removed using *state=absent*. - - | **required**: False - | **type**: str - | **default**: present - | **choices**: absent, present - - -marker - The marker line template. - - ``{mark}`` will be replaced with the values ``in marker_begin`` (default="BEGIN") and ``marker_end`` (default="END"). - - Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. - - | **required**: False - | **type**: str - | **default**: # {mark} ANSIBLE MANAGED BLOCK - - -block - The text to insert inside the marker lines. - - Multi-line can be separated by '\n'. - - Any double-quotation marks will be removed. - - | **required**: False - | **type**: str - - -insertafter - If specified, the block will be inserted after the last match of the specified regular expression. - - A special value ``EOF`` for inserting a block at the end of the file is available. - - If a specified regular expression has no matches, ``EOF`` will be used instead. - - Choices are EOF or '*regex*'. - - Default is EOF. - - | **required**: False - | **type**: str - - -insertbefore - If specified, the block will be inserted before the last match of specified regular expression. - - A special value ``BOF`` for inserting the block at the beginning of the file is available. - - If a specified regular expression has no matches, the block will be inserted at the end of the file. - - Choices are BOF or '*regex*'. - - | **required**: False - | **type**: str - - -marker_begin - This will be inserted at ``{mark}`` in the opening ansible block marker. - - Value needs to be different from *marker_end*. - - | **required**: False - | **type**: str - | **default**: BEGIN - - -marker_end - This will be inserted at ``{mark}`` in the closing ansible block marker. - - Value must be different from *marker_begin*. - - | **required**: False - | **type**: str - | **default**: END - - -backup - Specifies whether a backup of destination should be created before editing the source *src*. - - When set to ``true``, the module creates a backup file or data set. - - The backup file name will be returned on either success or failure of module execution such that data can be retrieved. - - Use generation data set (GDS) relative positive name. e.g. *SOME.CREATION(+1*). - - | **required**: False - | **type**: bool - | **default**: False - - -backup_name - Specify the USS file name or data set name for the destination backup. - - If the source *src* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. - - If the source is an MVS data set, the backup_name name must be an MVS data set name, and the dataset must not be preallocated. - - If the backup_name is not provided, the default backup_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. - - If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - - If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. - - | **required**: False - | **type**: str - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -encoding - The character set of the source *src*. `zos_blockinfile <./zos_blockinfile.html>`_ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: False - | **type**: str - | **default**: IBM-1047 - - -force - Specifies that the data set can be shared with others during an update which results in the data set you are updating to be simultaneously updated by others. - - This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. - - | **required**: False - | **type**: bool - | **default**: False - - -indentation - Defines the number of spaces needed to prepend in every line of the block. - - | **required**: False - | **type**: int - | **default**: 0 - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Insert/Update new mount point - zos_blockinfile: - src: SYS1.PARMLIB(BPXPRM00) - marker: "/* {mark} ANSIBLE MANAGED BLOCK */" - block: | - MOUNT FILESYSTEM('SOME.DATA.SET') TYPE(ZFS) MODE(READ) - MOUNTPOINT('/tmp/src/somedirectory') - - name: Remove a library as well as surrounding markers - zos_blockinfile: - state: absent - src: SYS1.PARMLIB(PROG00) - marker: "/* {mark} ANSIBLE MANAGED BLOCK FOR SOME.DATA.SET */" - - name: Add ZOAU path to PATH in /etc/profile - zos_blockinfile: - src: /etc/profile - insertafter: "PATH=" - block: | - ZOAU=/path/to/zoau_dir/bin - export ZOAU - PATH=$ZOAU:$PATH - - name: Insert/Update HTML surrounded by custom markers after line - zos_blockinfile: - path: /var/www/html/index.html - marker: "" - insertafter: "" - block: | -

Welcome to {{ ansible_hostname }}

-

Last updated on {{ ansible_date_time.iso8601 }}

- - name: Remove HTML as well as surrounding markers - zos_blockinfile: - path: /var/www/html/index.html - state: absent - marker: "" - - name: Add mappings to /etc/hosts - zos_blockinfile: - path: /etc/hosts - block: | - {{ item.ip }} {{ item.name }} - marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}" - loop: - - { name: host1, ip: 10.10.1.10 } - - { name: host2, ip: 10.10.1.11 } - - { name: host3, ip: 10.10.1.12 } - - name: Add a code block to a member using a predefined indentation. - zos_blockinfile: - path: SYS1.PARMLIB(BPXPRM00) - block: | - DSN SYSTEM({{ DB2SSID }}) - RUN PROGRAM(DSNTEP2) PLAN(DSNTEP12) - - LIB('{{ DB2RUN }}.RUNLIB.LOAD') - indentation: 16 - - - name: Update a script with commands containing quotes. - zos_blockinfile: - src: "/u/scripts/script.sh" - insertafter: "EOF" - block: | - cat "//'{{ DS_NAME }}'" - cat "//'{{ DS_NAME_2 }}'" - - - name: Set facts for the following two tasks. - set_fact: - HLQ: 'ANSIBLE' - MLQ: 'MEMBER' - LLQ: 'TEST' - MEM: '(JCL)' - MSG: 'your first JCL program' - CONTENT: "{{ lookup('file', 'files/content.txt') }}" - - - name: Update JCL in a PDS member with Jinja2 variable syntax. - zos_blockinfile: - src: "{{ HLQ }}.{{MLQ}}.{{LLQ}}{{MEM}}" - insertafter: "HELLO, WORLD" - marker: "//* {mark} *//" - marker_begin: "Begin Ansible Block Insertion 1" - marker_end: "End Ansible Block Insertion 1" - state: present - block: | - This is {{ MSG }}, and its now - managed by Ansible. - - - name: Update JCL in PDS member with content from a file. - zos_blockinfile: - src: "{{ HLQ }}.{{MLQ}}.{{LLQ}}{{MEM}}" - insertafter: "End Ansible Block Insertion 1" - marker: "//* {mark} *//" - marker_begin: "Begin Ansible Block Insertion 2" - marker_end: "End Ansible Block Insertion 2" - block: "{{ CONTENT }}" - - - name: Add a block to a gds - zos_blockinfile: - src: TEST.SOME.CREATION(0) - insertafter: EOF - block: "{{ CONTENT }}" - - - name: Add a block to dataset and backup in a new generation of gds - zos_blockinfile: - src: SOME.CREATION.TEST - insertbefore: BOF - backup: true - backup_name: CREATION.GDS(+1) - block: "{{ CONTENT }}" - - - - -Notes ------ - -.. note:: - It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. - - For supported character sets used to encode data, refer to the `documentation `_. - - When using ``with_*`` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - - When more then one block should be handled in a file you must change the *marker* per task. - - When working with a backup of a sequential dataset, the backup name should also be a sequential dataset. This will avoid the false positive and error condition during backup. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_data_set_module` - - - - -Return Values -------------- - - -changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - 1 - -found - Number of the matching patterns - - | **returned**: success - | **type**: int - | **sample**: 5 - -cmd - Constructed ZOAU dmod shell command based on the parameters - - | **returned**: success - | **type**: str - | **sample**: dmod -d -b -c IBM-1047 -m "BEGIN\nEND\n# {mark} ANSIBLE MANAGED BLOCK" -e "$ a\\PATH=/dir/bin:$PATH" /etc/profile - -msg - The module messages - - | **returned**: failure - | **type**: str - | **sample**: Parameter verification failed - -stdout - The stdout from ZOAU dmod when json.loads() fails to parse the result from dmod - - | **returned**: failure - | **type**: str - -stderr - The error messages from ZOAU dmod - - | **returned**: failure - | **type**: str - | **sample**: BGYSC1311E Iconv error, cannot open converter from ISO-88955-1 to IBM-1047 - -rc - The return code from ZOAU dmod when json.loads() fails to parse the result from dmod - - | **returned**: failure - | **type**: bool - -backup_name - Name of the backup file or data set that was created. - - | **returned**: if backup=true, always - | **type**: str - | **sample**: /path/to/file.txt.2015-02-03@04:15~ - diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst deleted file mode 100644 index 5fe5e565f5..0000000000 --- a/docs/source/modules/zos_copy.rst +++ /dev/null @@ -1,1187 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_copy.py - -.. _zos_copy_module: - - -zos_copy -- Copy data to z/OS -============================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The `zos_copy <./zos_copy.html>`_ module copies a file or data set from a local or a remote machine to a location on the remote machine. - - - - - -Parameters ----------- - - -asa_text - If set to ``true``, indicates that either ``src`` or ``dest`` or both contain ASA control characters. - - When ``src`` is a USS file and ``dest`` is a data set, the copy will preserve ASA control characters in the destination. - - When ``src`` is a data set containing ASA control characters and ``dest`` is a USS file, the copy will put all control characters as plain text in the destination. - - If ``dest`` is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). - - If neither ``src`` or ``dest`` have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. - - This option is only valid for text files. If ``is_binary`` is ``true`` or ``executable`` is ``true`` as well, the module will fail. - - | **required**: False - | **type**: bool - | **default**: False - - -identical_gdg_copy - If set to ``true``, and the destination GDG does not exist, the module will copy the source GDG to the destination GDG with identical GDS absolute names. - - If set to ``false``, the copy will be done as a normal copy, without preserving the source GDG absolute names. - - | **required**: False - | **type**: bool - | **default**: False - - -backup - Specifies whether a backup of the destination should be created before copying data. - - When set to ``true``, the module creates a backup file or data set. - - The backup file name will be returned on either success or failure of module execution such that data can be retrieved. - - | **required**: False - | **type**: bool - | **default**: False - - -backup_name - Specify a unique USS file name or data set name for the destination backup. - - If the destination ``dest`` is a USS file or path, the ``backup_name`` must be an absolute path name. - - If the destination is an MVS data set name, the ``backup_name`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. - - If the ``backup_name`` is not provided, the default ``backup_name`` will be used. If the ``dest`` is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the ``dest`` is an MVS data set, it will be a data set with a randomly generated name. - - If ``dest`` is a data set member and ``backup_name`` is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. - - If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). - - | **required**: False - | **type**: str - - -content - When used instead of ``src``, sets the contents of a file or data set directly to the specified value. - - Works only when ``dest`` is a USS file, sequential data set, or a partitioned data set member. - - If ``dest`` is a directory, then content will be copied to ``/path/to/dest/inline_copy``. - - | **required**: False - | **type**: str - - -dest - The remote absolute path or data set where the content should be copied to. - - ``dest`` can be a USS file, directory or MVS data set name. - - ``dest`` can be a alias name of a PS, PDS or PDSE data set. - - If ``dest`` has missing parent directories, they will be created. - - If ``dest`` is a nonexistent USS file, it will be created. - - If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. - - If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. - - If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32720, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. - - If ``src`` is a file and ``dest`` a partitioned data set, ``dest`` does not need to include a member in its value, the module can automatically compute the resulting member name from ``src``. - - When ``dest`` is a data set, precedence rules apply. If ``dest_data_set`` is set, this will take precedence over an existing data set. If ``dest`` is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, ``dest`` will be created with the same attributes of ``src``. - - When the ``dest`` is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) ``dest`` will be deleted and recreated following the process outlined in the ``volume`` option. - - When the ``dest`` is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the ``volume`` option. - - When ``dest`` is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the ``volume`` option. - - ``dest`` can be a previously allocated generation data set (GDS) or a new GDS. - - When ``dest`` is a generation data group (GDG), ``src`` must be a GDG too. The copy will allocate successive new generations in ``dest``, the module will verify it has enough available generations before starting the copy operations. - - When ``dest`` is a data set, you can override storage management rules by specifying ``volume`` if the storage class being used has GUARANTEED_SPACE=YES specified, otherwise, the allocation will fail. See ``volume`` for more volume related processes. - - | **required**: True - | **type**: str - - -encoding - Specifies which encodings the destination file or data set should be converted from and to. - - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. - - Only valid if ``is_binary`` is false. - - | **required**: False - | **type**: dict - - - from - The encoding to be converted from - - | **required**: True - | **type**: str - - - to - The encoding to be converted to - - | **required**: False - | **type**: str - - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -force - If set to ``true`` and the remote file or data set ``dest`` is empty, the ``dest`` will be reused. - - If set to ``true`` and the remote file or data set ``dest`` is NOT empty, the ``dest`` will be deleted and recreated with the ``src`` data set attributes, otherwise it will be recreated with the ``dest`` data set attributes. - - To backup data before any deletion, see parameters ``backup`` and ``backup_name``. - - If set to ``false``, the file or data set will only be copied if the destination does not exist. - - If set to ``false`` and destination exists, the module exits with a note to the user. - - | **required**: False - | **type**: bool - | **default**: False - - -force_lock - By default, when ``dest`` is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use ``force_lock`` to bypass DISP=SHR and continue with the copy operation. - - If set to ``true`` and destination is a MVS data set opened by another process then zos_copy will try to copy using DISP=SHR. - - Using ``force_lock`` uses operations that are subject to race conditions and can lead to data loss, use with caution. - - If a data set member has aliases, and is not a program object, copying that member to a dataset that is in use will result in the aliases not being preserved in the target dataset. When this scenario occurs the module will fail. - - | **required**: False - | **type**: bool - | **default**: False - - -ignore_sftp_stderr - During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will cause the module to fail. - - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. - - | **required**: False - | **type**: bool - | **default**: True - - -is_binary - If set to ``true``, indicates that the file or data set to be copied is a binary file or data set. - - When *is_binary=true*, no encoding conversion is applied to the content, all content transferred retains the original state. - - Use *is_binary=true* when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. - - | **required**: False - | **type**: bool - | **default**: False - - -executable - If set to ``true``, indicates that the file or library to be copied is an executable. - - If *executable=true*, and ``dest`` is a data set, it must be a PDS or PDSE (library). - - If ``dest`` is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. - - If ``dest`` is a file, execute permission for the user will be added to the file (``u+x``). - - If the ``src`` executable has an alias, the alias will not be copied unless ``aliases=true``. - - | **required**: False - | **type**: bool - | **default**: False - - -aliases - If set to ``true``, indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. - - Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when ``executable=True`` and ``dest`` is a USS file or directory, this option will be ignored. - - Copying of aliases for text-based data sets from USS sources or to USS destinations is not currently supported. - - If the ``dest`` is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. - - | **required**: False - | **type**: bool - | **default**: False - - -local_follow - This flag indicates that any existing filesystem links in the source tree should be followed. - - | **required**: False - | **type**: bool - | **default**: True - - -group - Name of the group that will own the file system objects. - - When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - - This option is only applicable if ``dest`` is USS, otherwise ignored. - - | **required**: False - | **type**: str - - -mode - The permission of the destination file or directory. - - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. - - *mode=preserve* means that the file will be given the same permissions as the source file. - - | **required**: False - | **type**: str - - -owner - Name of the user that should own the filesystem object, as would be passed to the chown command. - - When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - - This option is only applicable if ``dest`` is USS, otherwise ignored. - - | **required**: False - | **type**: str - - -remote_src - If set to ``false``, the module searches for ``src`` at the local machine. - - If set to ``true``, the module goes to the remote/target machine for ``src``. - - | **required**: False - | **type**: bool - | **default**: False - - -src - Path to a file/directory or name of a data set to copy to remote z/OS system. - - ``src`` can be a alias name of a PS, PDS or PDSE data set. - - If ``remote_src`` is true, then ``src`` must be the path to a Unix System Services (USS) file, name of a data set, or data set member. - - If ``src`` is a local path or a USS path, it can be absolute or relative. - - If ``src`` is a directory, ``dest`` must be a partitioned data set or a USS directory. - - If ``src`` is a file and ``dest`` ends with "/" or is a directory, the file is copied to the directory with the same filename as ``src``. - - If ``src`` is a directory and ends with "/", the contents of it will be copied into the root of ``dest``. If it doesn't end with "/", the directory itself will be copied. - - If ``src`` is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. - - If ``src`` is a VSAM data set, ``dest`` must also be a VSAM. - - If ``src`` is a generation data set (GDS), it must be a previously allocated one. - - If ``src`` is a generation data group (GDG), ``dest`` can be another GDG or a USS directory. - - Wildcards can be used to copy multiple PDS/PDSE members to another PDS/PDSE. i.e. Using SOME.TEST.PDS(*) will copy all members from one PDS/E to another without removing the destination PDS/E. - - Required unless using ``content``. - - | **required**: False - | **type**: str - - -validate - Specifies whether to perform checksum validation for source and destination files. - - Valid only for USS destination, otherwise ignored. - - | **required**: False - | **type**: bool - | **default**: False - - -volume - If ``dest`` does not exist, specify which volume ``dest`` should be allocated to. - - Only valid when the destination is an MVS data set. - - The volume must already be present on the device. - - If no volume is specified, storage management rules will be used to determine the volume where ``dest`` will be allocated. - - If the storage administrator has specified a system default unit name and you do not set a ``volume`` name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. - - | **required**: False - | **type**: str - - -dest_data_set - Data set attributes to customize a ``dest`` data set to be copied into. - - Some attributes only apply when ``dest`` is a generation data group (GDG). - - | **required**: False - | **type**: dict - - - type - Organization of the destination - - | **required**: True - | **type**: str - | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, member, basic, large, library, gdg - - - space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_type - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - | **required**: False - | **type**: str - | **choices**: k, m, g, cyl, trk - - - record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) - - Choices are case-sensitive. - - | **required**: False - | **type**: str - | **choices**: fb, vb, fba, vba, u - - - record_length - The length of each record in the data set, in bytes. - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - | **required**: False - | **type**: int - - - block_size - The block size to use for the data set. - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - key_offset - The key offset to use when creating a KSDS data set. - - *key_offset* is required when *type=ksds*. - - *key_offset* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - key_length - The key length to use when creating a KSDS data set. - - *key_length* is required when *type=ksds*. - - *key_length* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - sms_storage_class - The storage class for an SMS-managed dataset. - - Required for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_data_class - The data class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_management_class - The management class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - limit - Sets the *limit* attribute for a GDG. - - Specifies the maximum number, from 1 to 255(up to 999 if extended), of generations that can be associated with the GDG being defined. - - *limit* is required when *type=gdg*. - - | **required**: False - | **type**: int - - - empty - Sets the *empty* attribute for a GDG. - - If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. - - If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. - - | **required**: False - | **type**: bool - - - scratch - Sets the *scratch* attribute for a GDG. - - Specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. - - | **required**: False - | **type**: bool - - - purge - Sets the *purge* attribute for a GDG. - - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. - - | **required**: False - | **type**: bool - - - extended - Sets the *extended* attribute for a GDG. - - If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. - - If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. - - | **required**: False - | **type**: bool - - - fifo - Sets the *fifo* attribute for a GDG. - - If false, the order is the newest GDS defined to the oldest GDS. This is the default value. - - If true, the order is the oldest GDS defined to the newest GDS. - - | **required**: False - | **type**: bool - - - -use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - - Only valid when ``src`` is a local file or directory. - - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ - - | **required**: False - | **type**: bool - | **default**: False - - -template_parameters - Options to set the way Jinja2 will process templates. - - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - - These options are ignored unless ``use_template`` is true. - - | **required**: False - | **type**: dict - - - variable_start_string - Marker for the beginning of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: {{ - - - variable_end_string - Marker for the end of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: }} - - - block_start_string - Marker for the beginning of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: {% - - - block_end_string - Marker for the end of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: %} - - - comment_start_string - Marker for the beginning of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: {# - - - comment_end_string - Marker for the end of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: #} - - - line_statement_prefix - Prefix used by Jinja2 to identify line-based statements. - - | **required**: False - | **type**: str - - - line_comment_prefix - Prefix used by Jinja2 to identify comment lines. - - | **required**: False - | **type**: str - - - lstrip_blocks - Whether Jinja2 should strip leading spaces from the start of a line to a block. - - | **required**: False - | **type**: bool - | **default**: False - - - trim_blocks - Whether Jinja2 should remove the first newline after a block is removed. - - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. - - | **required**: False - | **type**: bool - | **default**: True - - - keep_trailing_newline - Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. - - | **required**: False - | **type**: bool - | **default**: False - - - newline_sequence - Sequence that starts a newline in a template. - - | **required**: False - | **type**: str - | **default**: \\n - | **choices**: \\n, \\r, \\r\\n - - - auto_reload - Whether to reload a template file when it has changed after the task has started. - - | **required**: False - | **type**: bool - | **default**: False - - - autoescape - Whether to enable autoescape of XML/HTML elements on a template. - - | **required**: False - | **type**: bool - | **default**: True - - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Copy a local file to a sequential data set - zos_copy: - src: /path/to/sample_seq_data_set - dest: SAMPLE.SEQ.DATA.SET - - - name: Copy a local file to a USS location and validate checksum - zos_copy: - src: /path/to/test.log - dest: /tmp/test.log - validate: true - - - name: Copy a local ASCII encoded file and convert to IBM-1047 - zos_copy: - src: /path/to/file.txt - dest: /tmp/file.txt - - - name: Copy a local directory to a PDSE - zos_copy: - src: /path/to/local/dir/ - dest: HLQ.DEST.PDSE - - - name: Copy file with permission details - zos_copy: - src: /path/to/foo.conf - dest: /etc/foo.conf - mode: "0644" - group: foo - owner: bar - - - name: Module will follow the symbolic link specified in src - zos_copy: - src: /path/to/link - dest: /path/to/uss/location - local_follow: true - - - name: Copy a local file to a PDS member and convert encoding - zos_copy: - src: /path/to/local/file - dest: HLQ.SAMPLE.PDSE(MEMBER) - encoding: - from: UTF-8 - to: IBM-037 - - - name: Copy a VSAM (KSDS) to a VSAM (KSDS) - zos_copy: - src: SAMPLE.SRC.VSAM - dest: SAMPLE.DEST.VSAM - remote_src: true - - - name: Copy inline content to a sequential dataset and replace existing data - zos_copy: - content: 'Inline content to be copied' - dest: SAMPLE.SEQ.DATA.SET - - - name: Copy a USS file to sequential data set and convert encoding beforehand - zos_copy: - src: /path/to/remote/uss/file - dest: SAMPLE.SEQ.DATA.SET - remote_src: true - - - name: Copy a USS directory to another USS directory - zos_copy: - src: /path/to/uss/dir - dest: /path/to/dest/dir - remote_src: true - - - name: Copy a local binary file to a PDSE member - zos_copy: - src: /path/to/binary/file - dest: HLQ.SAMPLE.PDSE(MEMBER) - is_binary: true - - - name: Copy a sequential data set to a PDS member - zos_copy: - src: SAMPLE.SEQ.DATA.SET - dest: HLQ.SAMPLE.PDSE(MEMBER) - remote_src: true - - - name: Copy a local file and take a backup of the existing file - zos_copy: - src: /path/to/local/file - dest: /path/to/dest - backup: true - backup_name: /tmp/local_file_backup - - - name: Copy a PDS on remote system to a new PDS - zos_copy: - src: HLQ.SRC.PDS - dest: HLQ.NEW.PDS - remote_src: true - - - name: Copy a PDS on remote system to a PDS, replacing the original - zos_copy: - src: HLQ.SAMPLE.PDSE - dest: HLQ.EXISTING.PDSE - remote_src: true - force: true - - - name: Copy PDS member to a new PDS member. Replace if it already exists - zos_copy: - src: HLQ.SAMPLE.PDSE(SRCMEM) - dest: HLQ.NEW.PDSE(DESTMEM) - remote_src: true - force: true - - - name: Copy a USS file to a PDSE member. If PDSE does not exist, allocate it - zos_copy: - src: /path/to/uss/src - dest: DEST.PDSE.DATA.SET(MEMBER) - remote_src: true - - - name: Copy a sequential data set to a USS file - zos_copy: - src: SRC.SEQ.DATA.SET - dest: /tmp/ - remote_src: true - - - name: Copy a PDSE member to USS file - zos_copy: - src: SRC.PDSE(MEMBER) - dest: /tmp/member - remote_src: true - - - name: Copy a PDS to a USS directory (/tmp/SRC.PDS) - zos_copy: - src: SRC.PDS - dest: /tmp - remote_src: true - - - name: Copy all members inside a PDS to another PDS - zos_copy: - src: SOME.SRC.PDS(*) - dest: SOME.DEST.PDS - remote_src: true - - - name: Copy all members starting with 'ABC' inside a PDS to another PDS - zos_copy: - src: SOME.SRC.PDS(ABC*) - dest: SOME.DEST.PDS - remote_src: true - - - name: Allocate destination in a specific volume - zos_copy: - src: SOME.SRC.PDS - dest: SOME.DEST.PDS - volume: 'VOL033' - remote_src: true - - - name: Copy a USS file to a fully customized sequential data set - zos_copy: - src: /path/to/uss/src - dest: SOME.SEQ.DEST - remote_src: true - volume: '222222' - dest_data_set: - type: seq - space_primary: 10 - space_secondary: 3 - space_type: k - record_format: vb - record_length: 150 - - - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL - zos_copy: - src: HLQ.COBOLSRC.PDSE(TESTPGM) - dest: HLQ.NEW.PDSE(MYCOBOL) - remote_src: true - executable: true - aliases: true - - - name: Copy a Load Library from a USS directory /home/loadlib to a new PDSE - zos_copy: - src: '/home/loadlib/' - dest: HLQ.LOADLIB.NEW - remote_src: true - executable: true - aliases: true - - - name: Copy a file with ASA characters to a new sequential data set. - zos_copy: - src: ./files/print.txt - dest: HLQ.PRINT.NEW - asa_text: true - - - name: Copy a file to a new generation data set. - zos_copy: - src: /path/to/uss/src - dest: HLQ.TEST.GDG(+1) - remote_src: true - - - name: Copy a local file and take a backup of the existing file with a GDS. - zos_copy: - src: /path/to/local/file - dest: /path/to/dest - backup: true - backup_name: HLQ.BACKUP.GDG(+1) - - - - -Notes ------ - -.. note:: - Destination data sets are assumed to be in catalog. When trying to copy to an uncataloged data set, the module assumes that the data set does not exist and will create it. - - Destination will be backed up if either ``backup`` is ``true`` or ``backup_name`` is provided. If ``backup`` is ``false`` but ``backup_name`` is provided, task will fail. - - When copying local files or directories, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file or directory being copied. Temporary files will always be deleted, regardless of success or failure of the copy task. - - VSAM data sets can only be copied to other VSAM data sets. - - For supported character sets used to encode data, refer to the `documentation `_. - - This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - - Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. - - It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - If trying to copy a migrated data set, first recall it before executing this module. This module does not perform recalls automatically. See modules `zos_mvs_raw <./zos_mvs_raw.html>`_ and `zos_tso_cmd <./zos_tso_cmd.html>`_ for examples of how to recall migrated data sets using this collection. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_fetch_module` - - :ref:`zos_data_set_module` - - :ref:`zos_mvs_raw_module` - - :ref:`zos_tso_cmd_module` - - - - -Return Values -------------- - - -src - Source file or data set being copied. - - | **returned**: changed - | **type**: str - | **sample**: /path/to/source.log - -dest - Destination file/path or data set name. - - | **returned**: success - | **type**: str - | **sample**: SAMPLE.SEQ.DATA.SET - -dest_created - Indicates whether the module created the destination. - - | **returned**: success and if dest was created by the module. - | **type**: bool - | **sample**: - - .. code-block:: json - - true - -destination_attributes - Attributes of a dest created by the module. - - | **returned**: success and destination was created by the module. - | **type**: dict - | **sample**: - - .. code-block:: json - - { - "block_size": 32760, - "record_format": "fb", - "record_length": 45, - "space_primary": 2, - "space_secondary": 1, - "space_type": "k", - "type": "pdse" - } - - block_size - Block size of the dataset. - - | **type**: int - | **sample**: 32760 - - record_format - Record format of the dataset. - - | **type**: str - | **sample**: fb - - record_length - Record length of the dataset. - - | **type**: int - | **sample**: 45 - - space_primary - Allocated primary space for the dataset. - - | **type**: int - | **sample**: 2 - - space_secondary - Allocated secondary space for the dataset. - - | **type**: int - | **sample**: 1 - - space_type - Unit of measurement for space. - - | **type**: str - | **sample**: k - - type - Type of dataset allocated. - - | **type**: str - | **sample**: pdse - - -checksum - SHA256 checksum of the file after running zos_copy. - - | **returned**: When ``validate=true`` and if ``dest`` is USS - | **type**: str - | **sample**: 8d320d5f68b048fc97559d771ede68b37a71e8374d1d678d96dcfa2b2da7a64e - -backup_name - Name of the backup file or data set that was created. - - | **returned**: if backup=true or backup_name=true - | **type**: str - | **sample**: /path/to/file.txt.2015-02-03@04:15~ - -gid - Group id of the file, after execution. - - | **returned**: success and if dest is USS - | **type**: int - | **sample**: 100 - -group - Group of the file, after execution. - - | **returned**: success and if dest is USS - | **type**: str - | **sample**: httpd - -owner - Owner of the file, after execution. - - | **returned**: success and if dest is USS - | **type**: str - | **sample**: httpd - -uid - Owner id of the file, after execution. - - | **returned**: success and if dest is USS - | **type**: int - | **sample**: 100 - -mode - Permissions of the target, after execution. - - | **returned**: success and if dest is USS - | **type**: str - | **sample**: 420 - -size - Size(in bytes) of the target, after execution. - - | **returned**: success and dest is USS - | **type**: int - | **sample**: 1220 - -state - State of the target, after execution. - - | **returned**: success and if dest is USS - | **type**: str - | **sample**: file - -note - A note to the user after module terminates. - - | **returned**: When ``force=true`` and ``dest`` exists - | **type**: str - | **sample**: No data was copied - -msg - Failure message returned by the module. - - | **returned**: failure - | **type**: str - | **sample**: Error while gathering data set information - -stdout - The stdout from a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: Copying local file /tmp/foo/src to remote path /tmp/foo/dest - -stderr - The stderr of a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: No such file or directory "/tmp/foo" - -stdout_lines - List of strings containing individual lines from stdout. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "u\"Copying local file /tmp/foo/src to remote path /tmp/foo/dest..\"" - ] - -stderr_lines - List of strings containing individual lines from stderr. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - { - "u\"FileNotFoundError": "No such file or directory \u0027/tmp/foo\u0027\"" - } - ] - -rc - The return code of a USS or MVS command, if applicable. - - | **returned**: failure - | **type**: int - | **sample**: 8 - -cmd - The MVS command issued, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: REPRO INDATASET(SAMPLE.DATA.SET) OUTDATASET(SAMPLE.DEST.DATA.SET) - diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst deleted file mode 100644 index 9578ad66a0..0000000000 --- a/docs/source/modules/zos_data_set.rst +++ /dev/null @@ -1,886 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_data_set.py - -.. _zos_data_set_module: - - -zos_data_set -- Manage data sets -================================ - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Create, delete and set attributes of data sets. -- When forcing data set replacement, contents will not be preserved. - - - - - -Parameters ----------- - - -name - The name of the data set being managed. (e.g ``USER.TEST``) - - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if *type=member* or *state!=present* and not using *batch*. - - | **required**: False - | **type**: str - - -state - The final state desired for specified data set. - - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. - - - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. - - - If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. - - - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. - - - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. - - - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. - - - If *state=absent* and *type=gdg* and the GDG base has active generations the module will complete successfully with *changed=False*. To remove it option *force* needs to be used. If the GDG base does not have active generations the module will complete successfully with *changed=True*. - - - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. - - - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. - - - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. - - - If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - - - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. - - - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. - - - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. - - - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. - - - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. - - - If *state=present*, the data set is already cataloged and *volumes* is provided, the module will compare the volumes where it is cataloged against the provided *volumes*. If they don't match, the module will fail with an error indicating the data set is cataloged on a different volume. To resolve this, you must first uncatalog the data set before creating it on the new volume. - - - If *state=present*, the data set is already cataloged, *volumes* is provided, and the volumes match exactly, no action is taken and the module completes successfully with *changed=False*. - - - | **required**: False - | **type**: str - | **default**: present - | **choices**: present, absent, cataloged, uncataloged - - -type - The data set type to be used when creating a data set. (e.g ``pdse``). - - ``member`` expects to be used with an existing partitioned data set. - - Choices are case-sensitive. - - | **required**: False - | **type**: str - | **default**: pds - | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs, gdg - - -space_primary - The amount of primary space to allocate for the dataset. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - | **default**: 5 - - -space_secondary - The amount of secondary space to allocate for the dataset. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - | **default**: 3 - - -space_type - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - | **required**: False - | **type**: str - | **default**: m - | **choices**: k, m, g, cyl, trk - - -record_format - The format of the data set. (e.g ``FB``) - - Choices are case-sensitive. - - When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. - - | **required**: False - | **type**: str - | **default**: fb - | **choices**: fb, vb, fba, vba, u, f - - -sms_storage_class - The storage class for an SMS-managed dataset. - - Required for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - -sms_data_class - The data class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - -sms_management_class - The management class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - -record_length - The length, in bytes, of each record in the data set. - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - | **required**: False - | **type**: int - - -block_size - The block size to use for the data set. - - | **required**: False - | **type**: int - - -directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - -key_offset - The key offset to use when creating a KSDS data set. - - *key_offset* is required when *type=ksds*. - - *key_offset* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - -key_length - The key length to use when creating a KSDS data set. - - *key_length* is required when *type=ksds*. - - *key_length* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - -empty - Sets the *empty* attribute for Generation Data Groups. - - If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. - - If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. - - | **required**: False - | **type**: bool - | **default**: False - - -extended - Sets the *extended* attribute for Generation Data Groups. - - If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. - - If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. - - | **required**: False - | **type**: bool - | **default**: False - - -fifo - Sets the *fifo* attribute for Generation Data Groups. - - If false, the order is the newest GDS defined to the oldest GDS. This is the default value. - - If true, the order is the oldest GDS defined to the newest GDS. - - | **required**: False - | **type**: bool - | **default**: False - - -limit - Sets the *limit* attribute for Generation Data Groups. - - Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. - - *limit* is required when *type=gdg*. - - | **required**: False - | **type**: int - - -purge - Sets the *purge* attribute for Generation Data Groups. - - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. - - | **required**: False - | **type**: bool - | **default**: False - - -scratch - When ``state=absent``, specifies whether to physically remove the data set from the volume. - - If ``scratch=true``, the data set is deleted and its entry is removed from the volume's VTOC. - - If ``scratch=false``, the data set is uncataloged but not physically removed from the volume. This is the equivalent of using ``NOSCRATCH`` in an ``IDCAMS DELETE`` command. - - When ``state=present`` option **scratch** sets the *scratch* attribute for Generation Data Groups and is ignored for any other data set type. - - When ``state=present`` and ``type=GDG`` specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. - - | **required**: False - | **type**: bool - - -volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. - - - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. - - - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - - - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - - - *volumes* is required when *state=cataloged*. - - Accepts a string when using a single volume and a list of strings when using multiple. - - | **required**: False - | **type**: raw - - -replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. - - Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - - - If *replace=True*, all data in the original data set will be lost. - - | **required**: False - | **type**: bool - | **default**: False - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -force - Specifies that the data set can be shared with others during a member delete operation which results in the data set you are updating to be simultaneously updated by others. - - This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. - - The *force=True* only applies to data set members when *state=absent* and *type=member* and when removing a GDG base with active generations. - - If *force=True*, *type=gdg* and *state=absent* it will force remove a GDG base with active generations. - - | **required**: False - | **type**: bool - | **default**: False - - -batch - Batch can be used to perform operations on multiple data sets in a single module call. - - | **required**: False - | **type**: list - | **elements**: dict - - - name - The name of the data set being managed. (e.g ``USER.TEST``) - - If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. - - Required if *type=member* or *state!=present* - - | **required**: False - | **type**: str - - - state - The final state desired for specified data set. - - If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. - - - If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. - - - If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. - - - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. - - - If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. - - - If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. - - - If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. - - - If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. - - - If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. - - - If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. - - - If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. - - - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. - - - If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. - - - If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. - - - If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. - - - | **required**: False - | **type**: str - | **default**: present - | **choices**: present, absent, cataloged, uncataloged - - - type - The data set type to be used when creating a data set. (e.g ``pdse``) - - ``member`` expects to be used with an existing partitioned data set. - - Choices are case-sensitive. - - | **required**: False - | **type**: str - | **default**: pds - | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs, gdg - - - space_primary - The amount of primary space to allocate for the dataset. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - | **default**: 5 - - - space_secondary - The amount of secondary space to allocate for the dataset. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - | **default**: 3 - - - space_type - The unit of measurement to use when defining primary and secondary space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - | **required**: False - | **type**: str - | **default**: m - | **choices**: k, m, g, cyl, trk - - - record_format - The format of the data set. (e.g ``FB``) - - Choices are case-sensitive. - - When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. - - | **required**: False - | **type**: str - | **default**: fb - | **choices**: fb, vb, fba, vba, u, f - - - sms_storage_class - The storage class for an SMS-managed dataset. - - Required for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_data_class - The data class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_management_class - The management class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - record_length - The length, in bytes, of each record in the data set. - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - | **required**: False - | **type**: int - - - block_size - The block size to use for the data set. - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - key_offset - The key offset to use when creating a KSDS data set. - - *key_offset* is required when *type=ksds*. - - *key_offset* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - key_length - The key length to use when creating a KSDS data set. - - *key_length* is required when *type=ksds*. - - *key_length* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - empty - Sets the *empty* attribute for Generation Data Groups. - - If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. - - If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. - - | **required**: False - | **type**: bool - | **default**: False - - - extended - Sets the *extended* attribute for Generation Data Groups. - - If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. - - If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. - - | **required**: False - | **type**: bool - | **default**: False - - - fifo - Sets the *fifo* attribute for Generation Data Groups. - - If false, the order is the newest GDS defined to the oldest GDS. This is the default value. - - If true, the order is the oldest GDS defined to the newest GDS. - - | **required**: False - | **type**: bool - | **default**: False - - - limit - Sets the *limit* attribute for Generation Data Groups. - - Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. - - *limit* is required when *type=gdg*. - - | **required**: False - | **type**: int - - - purge - Sets the *purge* attribute for Generation Data Groups. - - Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. - - | **required**: False - | **type**: bool - | **default**: False - - - scratch - When ``state=absent``, specifies whether to physically remove the data set from the volume. - - If ``scratch=true``, the data set is deleted and its entry is removed from the volume's VTOC. - - If ``scratch=false``, the data set is uncataloged but not physically removed from the volume. This is the equivalent of using ``NOSCRATCH`` in an ``IDCAMS DELETE`` command. - - The default is ``true`` for non-GDG data sets and ``false`` for GDG data sets. - - | **required**: False - | **type**: bool - - - volumes - If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. - - - If creating a data set, *volumes* specifies the volume(s) where the data set should be created. - - - If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. - - - If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. - - - *volumes* is required when *state=cataloged*. - - Accepts a string when using a single volume and a list of strings when using multiple. - - | **required**: False - | **type**: raw - - - replace - When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. - - Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. - - - If *replace=True*, all data in the original data set will be lost. - - | **required**: False - | **type**: bool - | **default**: False - - - force - Specifies that the data set can be shared with others during a member delete operation which results in the data set you are updating to be simultaneously updated by others. - - This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. - - The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. - - The *force=True* only applies to data set members when *state=absent* and *type=member*. - - | **required**: False - | **type**: bool - | **default**: False - - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Create a sequential data set if it does not exist - zos_data_set: - name: someds.name.here - type: seq - state: present - - - name: Create a PDS data set if it does not exist - zos_data_set: - name: someds.name.here - type: pds - space_primary: 5 - space_type: m - record_format: fba - record_length: 25 - - - name: Attempt to replace a data set if it exists - zos_data_set: - name: someds.name.here - type: pds - space_primary: 5 - space_type: m - record_format: u - record_length: 25 - replace: true - - - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. - zos_data_set: - name: someds.name.here - type: pds - space_primary: 5 - space_type: m - record_format: u - record_length: 25 - volumes: "222222" - replace: true - - - name: Create an ESDS data set if it does not exist - zos_data_set: - name: someds.name.here - type: esds - - - name: Create a KSDS data set if it does not exist - zos_data_set: - name: someds.name.here - type: ksds - key_length: 8 - key_offset: 0 - - - name: Create an RRDS data set with storage class MYDATA if it does not exist - zos_data_set: - name: someds.name.here - type: rrds - sms_storage_class: mydata - - - name: Delete a data set if it exists - zos_data_set: - name: someds.name.here - state: absent - - - name: Uncatalog a data set but do not remove it from the volume. - zos_data_set: - name: someds.name.here - type: seq - state: absent - scratch: false - - - name: Delete a data set if it exists. If data set not cataloged, check on volume 222222 for the data set, and then catalog and delete if found. - zos_data_set: - name: someds.name.here - state: absent - volumes: "222222" - - - name: Write a member to an existing PDS; replace if member exists - zos_data_set: - name: someds.name.here(mydata) - type: member - replace: true - - - name: Write a member to an existing PDS; do not replace if member exists - zos_data_set: - name: someds.name.here(mydata) - type: member - - - name: Remove a member from an existing PDS - zos_data_set: - name: someds.name.here(mydata) - state: absent - type: member - - - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR - zos_data_set: - name: someds.name.here(mydata) - state: absent - type: member - force: true - - - name: Create multiple partitioned data sets and add one or more members to each - zos_data_set: - batch: - - name: someds.name.here1 - type: pds - space_primary: 5 - space_type: m - record_format: fb - replace: true - - name: someds.name.here1(member1) - type: member - - name: someds.name.here2(member1) - type: member - replace: true - - name: someds.name.here2(member2) - type: member - - - name: Catalog a data set present on volume 222222 if it is uncataloged. - zos_data_set: - name: someds.name.here - state: cataloged - volumes: "222222" - - - name: Uncatalog a data set if it is cataloged. - zos_data_set: - name: someds.name.here - state: uncataloged - - - name: Create a data set on volumes 000000 and 222222 if it does not exist. - zos_data_set: - name: someds.name.here - state: present - volumes: - - "000000" - - "222222" - - - - - - - - - - -Return Values -------------- - - -names - The data set names, including temporary generated data set names, in the order provided to the module. - - | **returned**: always - | **type**: list - | **elements**: str - diff --git a/docs/source/modules/zos_encode.rst b/docs/source/modules/zos_encode.rst deleted file mode 100644 index 4a5e61f798..0000000000 --- a/docs/source/modules/zos_encode.rst +++ /dev/null @@ -1,335 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_encode.py - -.. _zos_encode_module: - - -zos_encode -- Perform encoding operations. -========================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Converts the encoding of characters that are read from a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, or KSDS (VSAM data set). -- Writes the data to a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, or KSDS (VSAM data set). - - - - - -Parameters ----------- - - -encoding - Specifies which encodings the destination file or data set should be converted from and to. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: False - | **type**: dict - - - from - The character set of the source *src*. - - | **required**: False - | **type**: str - | **default**: IBM-1047 - - - to - The destination *dest* character set for the output to be written as. - - | **required**: False - | **type**: str - | **default**: ISO8859-1 - - - -src - The location can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). - - The USS path or file must be an absolute pathname. - - If *src* is a USS directory, all files will be encoded. - - Encoding a whole generation data group (GDG) is not supported. - - | **required**: True - | **type**: str - - -dest - The location where the converted characters are output. - - The destination *dest* can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). - - If the length of the PDSE member name used in *dest* is greater than 8 characters, the member name will be truncated when written out. - - If *dest* is not specified, the *src* will be used as the destination and will overwrite the *src* with the character set in the option *to_encoding*. - - The USS file or path must be an absolute pathname. - - If *dest* is a data set, it must be already allocated. - - | **required**: False - | **type**: str - - -backup - Creates a backup file or backup data set for *dest*, including the timestamp information to ensure that you retrieve the original file. - - *backup_name* can be used to specify a backup file name if *backup=true*. - - | **required**: False - | **type**: bool - | **default**: False - - -backup_name - Specify the USS file name or data set name for the dest backup. - - If dest is a USS file or path, *backup_name* must be a file or path name, and the USS path or file must be an absolute pathname. - - If dest is an MVS data set, the *backup_name* must be an MVS data set name. - - If *backup_name* is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. - - ``backup_name`` will be returned on either success or failure of module execution such that data can be retrieved. - - If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). - - | **required**: False - | **type**: str - - -backup_compress - Determines if backups to USS files or paths should be compressed. - - *backup_compress* is only used when *backup=true*. - - | **required**: False - | **type**: bool - | **default**: False - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Convert file encoding from IBM-1047 to ISO8859-1 for the same file - zos_encode: - src: /zos_encode/test.data - - - name: Convert file encoding from IBM-1047 to ISO8859-1 to another file with - backup - zos_encode: - src: /zos_encode/test.data - dest: /zos_encode_out/test.out - encoding: - from: IBM-1047 - to: ISO8859-1 - backup: true - backup_compress: true - - - name: Convert file encoding from IBM-1047 to ISO8859-1 to a directory - zos_encode: - src: /zos_encode/test.data - dest: /zos_encode_out/ - - - name: Convert file encoding from all files in a directory to another - directory - zos_encode: - src: /zos_encode/ - dest: /zos_encode_out/ - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a USS file to a sequential data set - zos_encode: - src: /zos_encode/test.data - dest: USER.TEST.PS - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from files in a directory to a partitioned - data set - zos_encode: - src: /zos_encode/ - dest: USER.TEST.PDS - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a USS file to a partitioned data set - member - zos_encode: - src: /zos_encode/test.data - dest: USER.TEST.PDS(TESTDATA) - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a sequential data set to a USS file - zos_encode: - src: USER.TEST.PS - dest: /zos_encode/test.data - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a PDS encoding to a USS directory - zos_encode: - src: USER.TEST.PDS - dest: /zos_encode/ - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a sequential data set to another - sequential data set - zos_encode: - src: USER.TEST.PS - dest: USER.TEST1.PS - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a sequential data set to a - partitioned data set (extended) member - zos_encode: - src: USER.TEST.PS - dest: USER.TEST1.PDS(TESTDATA) - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a USS file to a VSAM data set - zos_encode: - src: /zos_encode/test.data - dest: USER.TEST.VS - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a VSAM data set to a USS file - zos_encode: - src: USER.TEST.VS - dest: /zos_encode/test.data - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a VSAM data set to a sequential - data set - zos_encode: - src: USER.TEST.VS - dest: USER.TEST.PS - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Convert file encoding from a sequential data set a VSAM data set - zos_encode: - src: USER.TEST.PS - dest: USER.TEST.VS - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a USS file to a generation data set - zos_encode: - src: /zos_encode/test.data - dest: USER.TEST.GDG(0) - encoding: - from: ISO8859-1 - to: IBM-1047 - - - name: Convert file encoding from a USS file to a data set while using a GDG for backup - zos_encode: - src: /zos_encode/test.data - dest: USER.TEST.PS - encoding: - from: ISO8859-1 - to: IBM-1047 - backup: true - backup_name: USER.BACKUP.GDG(+1) - - - - -Notes ------ - -.. note:: - It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - - For supported character sets used to encode data, refer to the `documentation `_. - - - - - - - -Return Values -------------- - - -src - The location of the input characters identified in option *src*. - - | **returned**: always - | **type**: str - -dest - The name of the output file or data set. If dest is a USS file or path and the status has been changed in the conversion, the file status will also be returned. - - | **returned**: always - | **type**: str - -backup_name - Name of the backup file created. - - | **returned**: changed and if backup=yes - | **type**: str - | **sample**: /path/file_name.2020-04-23-08-32-29-bak.tar - diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst deleted file mode 100644 index 8a341dfcdc..0000000000 --- a/docs/source/modules/zos_fetch.rst +++ /dev/null @@ -1,359 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_fetch.py - -.. _zos_fetch_module: - - -zos_fetch -- Fetch data from z/OS -================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- This module fetches a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, generation data set (GDS), generation data group (GDG), or KSDS (VSAM data set) from a remote z/OS system. -- When fetching a sequential data set, the destination file name will be the same as the data set name. -- When fetching a PDS or PDSE, the destination will be a directory with the same name as the PDS or PDSE. -- When fetching a PDS/PDSE member, destination will be a file. -- Files that already exist at ``dest`` will be overwritten if they are different than ``src``. -- When fetching a GDS, the relative name will be resolved to its absolute one. -- When fetching a generation data group, the destination will be a directory with the same name as the GDG. - - - - - -Parameters ----------- - - -src - Name of a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS, PDSE, GDS, GDG or KSDS (VSAM data set). - - USS file paths should be absolute paths. - - | **required**: True - | **type**: str - - -dest - Local path where the file or data set will be stored. - - If dest is an existing file or directory, the contents will be overwritten. - - | **required**: True - | **type**: path - - -fail_on_missing - When set to true, the task will fail if the source file is missing. - - | **required**: False - | **type**: bool - | **default**: true - - -validate_checksum - Verify that the source and destination checksums match after the files are fetched. - - | **required**: False - | **type**: bool - | **default**: true - - -flat - If set to "true", override the default behavior of appending hostname/path/to/file to the destination, instead the file or data set will be fetched to the destination directory without appending remote hostname to the destination. - - | **required**: False - | **type**: bool - | **default**: false - - -is_binary - Specifies if the file being fetched is a binary. - - | **required**: False - | **type**: bool - | **default**: false - - -use_qualifier - Indicates whether the data set high level qualifier should be used when fetching. - - | **required**: False - | **type**: bool - | **default**: false - - -encoding - Specifies which encodings the fetched data set should be converted from and to. If this parameter is not provided, encoding conversions will not take place. - - | **required**: False - | **type**: dict - - - from - The character set of the source *src*. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: True - | **type**: str - - - to - The destination *dest* character set for the output to be written as. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: True - | **type**: str - - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -ignore_sftp_stderr - During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will cause the module to fail. - - When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. - - | **required**: False - | **type**: bool - | **default**: True - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: none - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Fetch file from USS and store in /tmp/fetched/hostname/tmp/somefile - zos_fetch: - src: /tmp/somefile - dest: /tmp/fetched - - - name: Fetch a sequential data set and store in /tmp/SOME.DATA.SET - zos_fetch: - src: SOME.DATA.SET - dest: /tmp/ - flat: true - - - name: Fetch a PDS as binary and store in /tmp/SOME.PDS.DATASET - zos_fetch: - src: SOME.PDS.DATASET - dest: /tmp/ - flat: true - is_binary: true - - - name: Fetch a UNIX file and don't validate its checksum - zos_fetch: - src: /tmp/somefile - dest: /tmp/ - flat: true - validate_checksum: false - - - name: Fetch a VSAM data set - zos_fetch: - src: USER.TEST.VSAM - dest: /tmp/ - flat: true - - - name: Fetch a PDS member named 'DATA' - zos_fetch: - src: USER.TEST.PDS(DATA) - dest: /tmp/ - flat: true - - - name: Fetch a USS file and convert from IBM-037 to ISO8859-1 - zos_fetch: - src: /etc/profile - dest: /tmp/ - encoding: - from: IBM-037 - to: ISO8859-1 - flat: true - - - name: Fetch the current generation data set from a GDG - zos_fetch: - src: USERHLQ.DATA.SET(0) - dest: /tmp/ - flat: true - - - name: Fetch a previous generation data set from a GDG - zos_fetch: - src: USERHLQ.DATA.SET(-3) - dest: /tmp/ - flat: true - - - name: Fetch a generation data group - zos_fetch: - src: USERHLQ.TEST.GDG - dest: /tmp/ - flat: true - - - - -Notes ------ - -.. note:: - When fetching PDSE and VSAM data sets, temporary storage will be used on the remote z/OS system. After the PDSE or VSAM data set is successfully transferred, the temporary storage will be deleted. The size of the temporary storage will correspond to the size of PDSE or VSAM data set being fetched. If module execution fails, the temporary storage will be deleted. - - To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the ``checksum`` parameter. - - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be fetched, it should be cataloged first. - - Fetching HFS or ZFS type data sets is currently not supported. - - For supported character sets used to encode data, refer to the `documentation `_. - - This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_data_set_module` - - :ref:`zos_copy_module` - - - - -Return Values -------------- - - -file - The source file path or data set on the remote machine. - - | **returned**: success - | **type**: str - | **sample**: SOME.DATA.SET - -dest - The destination file path on the controlling machine. - - | **returned**: success - | **type**: str - | **sample**: /tmp/SOME.DATA.SET - -is_binary - Indicates the transfer mode that was used to fetch. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - -checksum - The SHA256 checksum of the fetched file or data set. checksum validation is performed for all USS files and sequential data sets. - - | **returned**: success and src is a non-partitioned data set - | **type**: str - | **sample**: 8d320d5f68b048fc97559d771ede68b37a71e8374d1d678d96dcfa2b2da7a64e - -data_set_type - Indicates the fetched data set type. - - | **returned**: success - | **type**: str - | **sample**: PDSE - -note - Notice of module failure when ``fail_on_missing`` is false. - - | **returned**: failure and fail_on_missing=false - | **type**: str - | **sample**: The data set USER.PROCLIB does not exist. No data was fetched. - -msg - Message returned on failure. - - | **returned**: failure - | **type**: str - | **sample**: The source 'TEST.DATA.SET' does not exist or is uncataloged. - -stdout - The stdout from a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: DATA SET 'USER.PROCLIB' NOT IN CATALOG - -stderr - The stderr of a USS command or MVS command, if applicable - - | **returned**: failure - | **type**: str - | **sample**: File /tmp/result.log not found. - -stdout_lines - List of strings containing individual lines from stdout - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "u\u0027USER.TEST.PDS NOT IN CATALOG..\u0027" - ] - -stderr_lines - List of strings containing individual lines from stderr. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "u\u0027Unable to traverse PDS USER.TEST.PDS not found\u0027" - ] - -rc - The return code of a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: int - | **sample**: 8 - diff --git a/docs/source/modules/zos_find.rst b/docs/source/modules/zos_find.rst deleted file mode 100644 index 1c3d5222c1..0000000000 --- a/docs/source/modules/zos_find.rst +++ /dev/null @@ -1,398 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_find.py - -.. _zos_find_module: - - -zos_find -- Find matching data sets -=================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Return a list of data sets based on specific criteria. -- Multiple criteria can be added (AND'd) together. -- The ``zos_find`` module can only find MVS data sets. Use the `find `_ module to find USS files. - - - - - -Parameters ----------- - - -age - Select data sets whose age is equal to or greater than the specified time. - - Use a negative age to find data sets equal to or less than the specified time. - - You can choose days, weeks, months or years by specifying the first letter of any of those words (e.g., "1w"). The default is days. - - Age is determined by using the 'referenced date' of the data set. - - | **required**: False - | **type**: str - - -age_stamp - Choose the age property against which to compare age. - - ``creation_date`` is the date the data set was created and ``ref_date`` is the date the data set was last referenced. - - ``ref_date`` is only applicable to sequential and partitioned data sets. - - | **required**: False - | **type**: str - | **default**: creation_date - | **choices**: creation_date, ref_date - - -contains - A string which should be matched against the data set content or data set member content. - - | **required**: False - | **type**: str - - -excludes - Data sets whose names match an excludes pattern are culled from patterns matches. Multiple patterns can be specified using a list. - - The pattern can be a regular expression. - - If the pattern is a regular expression, it must match the full data set name. - - | **required**: False - | **type**: list - | **elements**: str - - -patterns - One or more data set or member patterns. - - The patterns restrict the list of data sets or members to be returned to those names that match at least one of the patterns specified. Multiple patterns can be specified using a list. - - This parameter expects a list, which can be either comma separated or YAML. - - If ``pds_patterns`` is provided, ``patterns`` must be member patterns. - - When searching for members within a PDS/PDSE, pattern can be a regular expression. - - | **required**: True - | **type**: list - | **elements**: str - - -size - Select data sets whose size is equal to or greater than the specified size. - - Use a negative size to find files equal to or less than the specified size. - - Unqualified values are in bytes but b, k, m, g, and t can be appended to specify bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively. - - Filtering by size is currently only valid for sequential and partitioned data sets. - - | **required**: False - | **type**: str - - -pds_patterns - List of PDS/PDSE to search. Wildcard is possible. - - Required when searching for data set members. - - Valid only for ``nonvsam`` resource types. Otherwise ignored. - - | **required**: False - | **type**: list - | **elements**: str - - -resource_type - The types of resources to search. - - ``nonvsam`` refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. - - ``cluster`` refers to a VSAM cluster. The ``data`` and ``index`` are the data and index components of a VSAM cluster. - - ``gdg`` refers to Generation Data Groups. The module searches based on the GDG base name. - - ``migrated`` refers to listing migrated datasets. Only ``excludes`` and ``migrated_type`` options can be used along with this option. The module only searches based on dataset patterns. - - | **required**: False - | **type**: list - | **elements**: str - | **default**: nonvsam - | **choices**: nonvsam, cluster, data, index, gdg, migrated - - -migrated_type - A migrated data set related attribute, only valid when ``resource_type=migrated``. - - If provided, will search for only those types of migrated datasets. - - | **required**: False - | **type**: list - | **elements**: str - | **default**: ['cluster', 'data', 'index', 'nonvsam'] - | **choices**: nonvsam, cluster, data, index - - -volume - If provided, only the data sets allocated in the specified list of volumes will be searched. - - | **required**: False - | **type**: list - | **elements**: str - - -empty - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *empty* attribute set as provided. - - | **required**: False - | **type**: bool - - -extended - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *extended* attribute set as provided. - - | **required**: False - | **type**: bool - - -fifo - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *fifo* attribute set as provided. - - | **required**: False - | **type**: bool - - -limit - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *limit* attribute set as provided. - - | **required**: False - | **type**: int - - -purge - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *purge* attribute set as provided. - - | **required**: False - | **type**: bool - - -scratch - A GDG attribute, only valid when ``resource_type=gdg``. - - If provided, will search for data sets with *scratch* attribute set as provided. - - | **required**: False - | **type**: bool - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Find all data sets with HLQ 'IMS.LIB' or 'IMSTEST.LIB' that contain the word 'hello' - zos_find: - patterns: - - IMS.LIB.* - - IMSTEST.LIB.* - contains: 'hello' - age: 2d - - - name: Search for 'rexx' in all datasets matching IBM.TSO.*.C?? - zos_find: - patterns: - - IBM.TSO.*.C?? - contains: 'rexx' - - - name: Exclude data sets that have a low level qualifier 'TEST' - zos_find: - patterns: 'IMS.LIB.*' - contains: 'hello' - excludes: '.*TEST' - - - name: Find all members starting with characters 'TE' in a given list of PDS patterns - zos_find: - patterns: '^te.*' - pds_patterns: - - IMSTEST.TEST.* - - IMSTEST.USER.* - - USER.*.LIB - - - name: Find all data sets greater than 2MB and allocated in one of the specified volumes - zos_find: - patterns: 'USER.*' - size: 2m - volumes: - - SCR03 - - IMSSUN - - - name: Find all VSAM clusters starting with the word 'USER' - zos_find: - patterns: - - USER.* - resource_type: - - 'cluster' - - - name: Find all Generation Data Groups starting with the word 'USER' and specific GDG attributes. - zos_find: - patterns: - - USER.* - resource_type: - - 'gdg' - limit: 30 - scratch: true - purge: true - - - name: Find all migrated and nonvsam data sets starting with the word 'USER' - zos_find: - patterns: - - USER.* - resource_type: - - 'migrated' - migrated_type: - - 'nonvsam' - - - - -Notes ------ - -.. note:: - Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. - - The `zos_find <./zos_find.html>`_ module currently does not support wildcards for high level qualifiers. For example, ``SOME.*.DATA.SET`` is a valid pattern, but ``*.DATA.SET`` is not. - - If a data set pattern is specified as ``USER.*``, the matching data sets will have two name segments such as ``USER.ABC``, ``USER.XYZ`` etc. If a wildcard is specified as ``USER.*.ABC``, the matching data sets will have three name segments such as ``USER.XYZ.ABC``, ``USER.TEST.ABC`` etc. - - The time taken to execute the module is proportional to the number of data sets present on the system and how large the data sets are. - - When searching for content within data sets, only non-binary content is considered. - - As a migrated data set's information can't be retrieved without recalling it first, other options besides ``excludes`` and ``migrated_type`` are not supported. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_data_set_module` - - - - -Return Values -------------- - - -data_sets - All matches found with the specified criteria. - - | **returned**: success - | **type**: list - | **sample**: - - .. code-block:: json - - [ - { - "members": { - "COBU": null, - "MC2CNAM": null, - "TINAD": null - }, - "name": "IMS.CICS13.USERLIB", - "type": "NONVSAM" - }, - { - "name": "SAMPLE.DATA.SET", - "type": "CLUSTER" - }, - { - "name": "SAMPLE.VSAM.DATA", - "type": "DATA" - } - ] - -matched - The number of matched data sets found. - - | **returned**: success - | **type**: int - | **sample**: 49 - -examined - The number of data sets searched. - - | **returned**: success - | **type**: int - | **sample**: 158 - -msg - Failure message returned by the module. - - | **returned**: failure - | **type**: str - | **sample**: Error while gathering data set information - -stdout - The stdout from a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: Searching dataset IMSTESTL.COMNUC - -stderr - The stderr of a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - | **sample**: No such file or directory "/tmp/foo" - -rc - The return code of a USS or MVS command, if applicable. - - | **returned**: failure - | **type**: int - | **sample**: 8 - diff --git a/docs/source/modules/zos_gather_facts.rst b/docs/source/modules/zos_gather_facts.rst deleted file mode 100644 index 1821906a4f..0000000000 --- a/docs/source/modules/zos_gather_facts.rst +++ /dev/null @@ -1,138 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_gather_facts.py - -.. _zos_gather_facts_module: - - -zos_gather_facts -- Gather z/OS system facts. -============================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Retrieve variables from target z/OS systems. -- Variables are added to the *ansible_facts* dictionary, available to playbooks. -- Apply filters on the *gather_subset* list to reduce the variables that are added to the *ansible_facts* dictionary. -- Note, the module will fail fast if any unsupported options are provided. This is done to raise awareness of a failure in an automation setting. - - - - - -Parameters ----------- - - -gather_subset - If specified, it will collect facts that come under the specified subset (eg. ipl will return ipl facts). Specifying subsets is recommended to reduce time in gathering facts when the facts needed are in a specific subset. - - The following subsets are available ``ipl``, ``cpu``, ``sys``, and ``iodf``. Depending on the version of ZOAU, additional subsets may be available. - - | **required**: False - | **type**: list - | **elements**: str - | **default**: ['all'] - - -filter - Filter out facts from the *ansible_facts* dictionary. - - Uses shell-style `fnmatch `_ pattern matching to filter out the collected facts. - - An empty list means 'no filter', same as providing '*'. - - Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the *ansible_facts* dictionary. To restrict the facts that are collected, refer to the *gather_subset* parameter. - - | **required**: False - | **type**: list - | **elements**: str - | **default**: [] - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Return all available z/OS facts. - ibm.ibm_zos_core.zos_gather_facts: - - - name: Return z/OS facts in the systems subset ('sys'). - ibm.ibm_zos_core.zos_gather_facts: - gather_subset: sys - - - name: Return z/OS facts in the subsets ('ipl' and 'sys') and filter out all - facts that do not match 'parmlib'. - ibm.ibm_zos_core.zos_gather_facts: - gather_subset: - - ipl - - sys - filter: - - "*parmlib*" - - - - - - - - - - -Return Values -------------- - - -ansible_facts - Collection of facts that are gathered from the z/OS systems. - - | **returned**: when collected - | **type**: dict - | **sample**: - - .. code-block:: json - - [ - { - "ansible_facts": { - "arch_level": "2", - "hw_name": "SYSZD6", - "ipl_volume": "RES820", - "lpar_name": "SVLLAB01", - "primary_jes": "JES2", - "product_mod_level": "00", - "product_name": "z/OS", - "product_owner": "IBM CORP", - "product_release": "05", - "product_version": "02", - "smf_name": "3090", - "sys_name": "EC33018A", - "sysplex_name": "SVPLEX1", - "vm_name": "EC33018A" - } - } - ] - diff --git a/docs/source/modules/zos_job_output.rst b/docs/source/modules/zos_job_output.rst deleted file mode 100644 index 6b992d02dc..0000000000 --- a/docs/source/modules/zos_job_output.rst +++ /dev/null @@ -1,523 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_output.py - -.. _zos_job_output_module: - - -zos_job_output -- Display job output -==================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Display the z/OS job output for a given criteria (Job id/Job name/owner) with/without a data definition name as a filter. -- At least provide a job id/job name/owner. -- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC*" or "*". -- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP*" or "*". -- The owner can be specific such as "IBMUSER", or one that uses a pattern like "*". -- If there is no ddname, or if ddname="?", output of all the ddnames under the given job will be displayed. -- If SYSIN DDs are needed, *sysin_dd* should be set to ``true``. - - - - - -Parameters ----------- - - -job_id - The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC*") - - | **required**: False - | **type**: str - - -job_name - The name of the batch job. (e.g "TCPIP", "C*") - - | **required**: False - | **type**: str - - -owner - The owner who ran the job. (e.g "IBMUSER", "*") - - | **required**: False - | **type**: str - - -ddname - Data definition name (show only this DD on a found job). (e.g "JESJCL", "?") - - | **required**: False - | **type**: str - - -sysin_dd - Whether to include SYSIN DDs as part of the output. - - | **required**: False - | **type**: bool - | **default**: False - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Job output with ddname - zos_job_output: - job_id: "STC02560" - ddname: "JESMSGLG" - - - name: JES Job output without ddname - zos_job_output: - job_id: "STC02560" - - - name: JES Job output with all ddnames - zos_job_output: - job_id: "STC*" - job_name: "*" - owner: "IBMUSER" - ddname: "?" - - - name: Query a job's output including SYSIN DDs - zos_job_output: - job_id: "JOB00548" - sysin_dd: true - - - - - - - - - - -Return Values -------------- - - -jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. - - | **returned**: success - | **type**: list - | **elements**: dict - | **sample**: - - .. code-block:: json - - [ - { - "class": "R", - "content_type": "JOB", - "cpu_time": 1414, - "ddnames": [ - { - "byte_count": "775", - "content": [ - "1 J E S 2 J O B L O G -- S Y S T E M S T L 1 -- N O D E S T L 1 ", - "0 ", - " 10.25.48 JOB00134 ---- TUESDAY, 18 FEB 2020 ----", - " 10.25.48 JOB00134 IRR010I USERID OMVSADM IS ASSIGNED TO THIS JOB.", - " 10.25.48 JOB00134 $HASP375 JES2 ESTIMATED LINES EXCEEDED", - " 10.25.48 JOB00134 ICH70001I OMVSADM LAST ACCESS AT 10:25:47 ON TUESDAY, FEBRUARY 18, 2020", - " 10.25.48 JOB00134 $HASP375 HELLO ESTIMATED LINES EXCEEDED", - " 10.25.48 JOB00134 $HASP373 HELLO STARTED - INIT 3 - CLASS R - SYS STL1", - " 10.25.48 JOB00134 SMF000I HELLO STEP0001 IEBGENER 0000", - " 10.25.48 JOB00134 $HASP395 HELLO ENDED - RC=0000", - "0------ JES2 JOB STATISTICS ------", - "- 18 FEB 2020 JOB EXECUTION DATE", - "- 16 CARDS READ", - "- 59 SYSOUT PRINT RECORDS", - "- 0 SYSOUT PUNCH RECORDS", - "- 6 SYSOUT SPOOL KBYTES", - "- 0.00 MINUTES EXECUTION TIME" - ], - "ddname": "JESMSGLG", - "id": "2", - "procstep": "", - "record_count": "17", - "stepname": "JES2" - }, - { - "byte_count": "574", - "content": [ - " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", - " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", - " //* ", - " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", - " //* ", - " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", - " //* AND WILL CAUSE A JCL ERROR ", - " //* ", - " 2 //STEP0001 EXEC PGM=IEBGENER ", - " 3 //SYSIN DD DUMMY ", - " 4 //SYSPRINT DD SYSOUT=* ", - " 5 //SYSUT1 DD * ", - " 6 //SYSUT2 DD SYSOUT=* ", - " 7 // " - ], - "ddname": "JESJCL", - "id": "3", - "procstep": "", - "record_count": "14", - "stepname": "JES2" - }, - { - "byte_count": "1066", - "content": [ - " ICH70001I OMVSADM LAST ACCESS AT 10:25:47 ON TUESDAY, FEBRUARY 18, 2020", - " IEF236I ALLOC. FOR HELLO STEP0001", - " IEF237I DMY ALLOCATED TO SYSIN", - " IEF237I JES2 ALLOCATED TO SYSPRINT", - " IEF237I JES2 ALLOCATED TO SYSUT1", - " IEF237I JES2 ALLOCATED TO SYSUT2", - " IEF142I HELLO STEP0001 - STEP WAS EXECUTED - COND CODE 0000", - " IEF285I OMVSADM.HELLO.JOB00134.D0000102.? SYSOUT ", - " IEF285I OMVSADM.HELLO.JOB00134.D0000101.? SYSIN ", - " IEF285I OMVSADM.HELLO.JOB00134.D0000103.? SYSOUT ", - " IEF373I STEP/STEP0001/START 2020049.1025", - " IEF032I STEP/STEP0001/STOP 2020049.1025 ", - " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC ", - " VIRT: 60K SYS: 240K EXT: 0K SYS: 11548K", - " ATB- REAL: 8K SLOTS: 0K", - " VIRT- ALLOC: 10M SHRD: 0M", - " IEF375I JOB/HELLO /START 2020049.1025", - " IEF033I JOB/HELLO /STOP 2020049.1025 ", - " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC " - ], - "ddname": "JESYSMSG", - "id": "4", - "procstep": "", - "record_count": "19", - "stepname": "JES2" - }, - { - "byte_count": "251", - "content": [ - "1DATA SET UTILITY - GENERATE PAGE 0001 ", - "-IEB352I WARNING: ONE OR MORE OF THE OUTPUT DCB PARMS COPIED FROM INPUT ", - " ", - " PROCESSING ENDED AT EOD " - ], - "ddname": "SYSPRINT", - "id": "102", - "procstep": "", - "record_count": "4", - "stepname": "STEP0001" - }, - { - "byte_count": "49", - "content": [ - " HELLO, WORLD " - ], - "ddname": "SYSUT2", - "id": "103", - "procstep": "", - "record_count": "1", - "stepname": "STEP0001" - } - ], - "duration": 0, - "execution_node": "STL1", - "execution_time": "00:00:03", - "job_class": "R", - "job_id": "JOB00134", - "job_name": "HELLO", - "origin_node": "STL1", - "owner": "OMVSADM", - "priority": "1", - "program_name": "IEBGENER", - "queue_position": "58", - "ret_code": { - "code": 0, - "msg": "CC 0000", - "msg_code": "0000", - "msg_txt": "", - "steps": [ - { - "step_cc": 0, - "step_name": "STEP0001" - } - ] - }, - "subsystem": "STL1", - "system": "STL1" - } - ] - - job_id - The z/OS job ID of the job containing the spool file. - - | **type**: str - | **sample**: JOB00134 - - job_name - The name of the batch job. - - | **type**: str - | **sample**: HELLO - - system - The job entry system that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - subsystem - The job entry subsystem that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - cpu_time - Sum of the CPU time used by each job step, in microseconds. - - | **type**: int - | **sample**: 5 - - execution_node - Execution node that picked the job and executed it. - - | **type**: str - | **sample**: STL1 - - origin_node - Origin node that submitted the job. - - | **type**: str - | **sample**: STL1 - - class - Identifies the data set used in a system output data set, usually called a sysout data set. - - | **type**: str - - content_type - Type of address space used by the job, can be one of the following types. - APPC for an APPC Initiator. - JGRP for a JOBGROUP. - JOB for a Batch job. - STC for a Started task. - TSU for a Time sharing user. - \? for an unknown or pending job. - - | **type**: str - | **sample**: JOB - - creation_date - Date, local to the target system, when the job was created. - - | **type**: str - | **sample**: 2023-05-04 - - creation_time - Time, local to the target system, when the job was created. - - | **type**: str - | **sample**: 14:15:00 - - execution_time - Total duration time of the job execution, if it has finished. If the job is still running, it represents the time elapsed from the job execution start and current time. - - | **type**: str - | **sample**: 00:00:10 - - ddnames - Data definition names. - - | **type**: list - | **elements**: dict - - ddname - Data definition name. - - | **type**: str - | **sample**: JESMSGLG - - record_count - Count of the number of lines in a print data set. - - | **type**: int - | **sample**: 17 - - id - The file ID. - - | **type**: str - | **sample**: 2 - - stepname - A step name is name that identifies the job step so that other JCL statements or the operating system can refer to it. - - | **type**: str - | **sample**: JES2 - - procstep - Identifies the set of statements inside JCL grouped together to perform a particular function. - - | **type**: str - | **sample**: PROC1 - - byte_count - Byte size in a print data set. - - | **type**: int - | **sample**: 574 - - content - The ddname content. - - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", - " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", - " //* ", - " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", - " //* ", - " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", - " //* AND WILL CAUSE A JCL ERROR ", - " //* ", - " 2 //STEP0001 EXEC PGM=IEBGENER ", - " 3 //SYSIN DD DUMMY ", - " 4 //SYSPRINT DD SYSOUT=* ", - " 5 //SYSUT1 DD * ", - " 6 //SYSUT2 DD SYSOUT=* ", - " 7 // " - ] - - - job_class - Job class for this job. - - | **type**: str - | **sample**: A - - svc_class - Service class for this job. - - | **type**: str - | **sample**: C - - priority - A numeric indicator of the job priority assigned through JES. - - | **type**: int - | **sample**: 4 - - asid - The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. - - | **type**: int - - queue_position - The position within the job queue where the jobs resides. - - | **type**: int - | **sample**: 3 - - program_name - The name of the program found in the job's last completed step found in the PGM parameter. - - | **type**: str - | **sample**: IEBGENER - - ret_code - Return code output collected from job log. - - | **type**: dict - | **sample**: - - .. code-block:: json - - { - "ret_code": { - "code": 0, - "msg": "CC 0000", - "msg_code": "0000", - "msg_txt": "", - "steps": [ - { - "step_cc": 0, - "step_name": "STEP0001" - } - ] - } - } - - msg - Return code or abend resulting from the job submission. - - | **type**: str - | **sample**: CC 0000 - - msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". - - | **type**: str - | **sample**: S0C4 - - msg_txt - Returns additional information related to the job. - - | **type**: str - | **sample**: No job can be located with this job name: HELLO - - code - Return code converted to integer value (when possible). - - | **type**: int - - steps - Series of JCL steps that were executed and their return codes. - - | **type**: list - | **elements**: dict - - step_name - Name of the step shown as "was executed" in the DD section. - - | **type**: str - | **sample**: STEP0001 - - step_cc - The CC returned for this step in the DD section. - - | **type**: int - - - - -changed - Indicates if any changes were made during module operation - - | **returned**: on success - | **type**: bool - diff --git a/docs/source/modules/zos_job_query.rst b/docs/source/modules/zos_job_query.rst deleted file mode 100644 index 38cea61e34..0000000000 --- a/docs/source/modules/zos_job_query.rst +++ /dev/null @@ -1,389 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_query.py - -.. _zos_job_query_module: - - -zos_job_query -- Query job status -================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- List z/OS job(s) and the current status of the job(s). -- Uses job_name to filter the jobs by the job name. -- Uses job_id to filter the jobs by the job identifier. -- Uses owner to filter the jobs by the job owner. -- Uses system to filter the jobs by system where the job is running (or ran) on. - - - - - -Parameters ----------- - - -job_name - The job name to query. - - A job name can be up to 8 characters long. - - The *job_name* can contain include multiple wildcards. - - The asterisk (`*`) wildcard will match zero or more specified characters. - - Note that using this value will query the system for '*' and then return just matching values. - - This may lead to security issues if there are read-access limitations on some users or jobs. - - | **required**: False - | **type**: str - | **default**: * - - -owner - Identifies the owner of the job. - - If no owner is set, the default set is 'none' and all jobs will be queried. - - | **required**: False - | **type**: str - - -job_id - The job id that has been assigned to the job. - - A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. - - When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. - - The *job_id* can contain include multiple wildcards. - - The asterisk (`*`) wildcard will match zero or more specified characters. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Query a job with a job name of 'JOB12345' - zos_job_query: - job_name: "JOB12345" - - - name: Query jobs using a wildcard to match any job id begging with 'JOB12' - zos_job_query: - job_id: "JOB12*" - - - name: Query jobs using wildcards to match any job name begging with 'H' and ending in 'O'. - zos_job_query: - job_name: "H*O" - - - name: Query jobs using a wildcards to match a range of job id(s) that include 'JOB' and '014'. - zos_job_query: - job_id: JOB*014* - - - name: Query all job names beginning wih 'H' that match job id that includes '14'. - zos_job_query: - job_name: "H*" - job_id: "JOB*14*" - - - name: Query all jobs names beginning with 'LINK' for owner 'ADMIN'. - zos_job_query: - job_name: "LINK*" - owner: ADMIN - - - - - - - - - - -Return Values -------------- - - -changed - True if the state was changed, otherwise False. - - | **returned**: always - | **type**: bool - -jobs - The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. - - | **returned**: success - | **type**: list - | **elements**: dict - | **sample**: - - .. code-block:: json - - [ - { - "asid": 0, - "content_type": "JOB", - "cpu_time": 1414, - "creation_date": "2023-05-03", - "creation_time": "12:13:00", - "execution_node": "STL1", - "execution_time": "00:00:02", - "job_class": "K", - "job_id": "JOB01427", - "job_name": "LINKJOB", - "origin_node": "STL1", - "owner": "ADMIN", - "priority": 1, - "queue_position": 3, - "ret_code": "null", - "subsystem": "STL1", - "svc_class": "?", - "system": "STL1" - }, - { - "asid": 4, - "content_type": "JOB", - "cpu_time": 1414, - "creation_date": "2023-05-03", - "creation_time": "12:14:00", - "execution_node": "STL1", - "execution_time": "00:00:03", - "job_class": "A", - "job_id": "JOB16577", - "job_name": "LINKCBL", - "origin_node": "STL1", - "owner": "ADMIN", - "priority": 0, - "queue_position": 0, - "ret_code": { - "code": "null", - "msg": "CANCELED" - }, - "subsystem": "STL1", - "svc_class": "E", - "system": "STL1" - } - ] - - job_name - The name of the batch job. - - | **type**: str - | **sample**: LINKJOB - - owner - The owner who ran the job. - - | **type**: str - | **sample**: ADMIN - - job_id - Unique job identifier assigned to the job by JES. - - | **type**: str - | **sample**: JOB01427 - - content_type - Type of address space used by the job, can be one of the following types. - - APPC for an APPC Initiator. - - JGRP for a JOBGROUP. - - JOB for a Batch job. - - STC for a Started task. - - TSU for a Time sharing user. - - \? for an unknown or pending job. - - | **type**: str - | **sample**: STC - - system - The job entry system that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - subsystem - The job entry subsystem that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - cpu_time - Sum of the CPU time used by each job step, in microseconds. - - | **type**: int - | **sample**: 5 - - execution_node - Execution node that picked the job and executed it. - - | **type**: str - | **sample**: STL1 - - origin_node - Origin node that submitted the job. - - | **type**: str - | **sample**: STL1 - - ret_code - Return code output collected from job log. - - | **type**: dict - | **sample**: - - .. code-block:: json - - { - "ret_code": { - "code": 0, - "msg": "CC 0000", - "msg_code": "0000", - "msg_txt": "", - "steps": [ - { - "step_cc": 0, - "step_name": "STEP0001" - } - ] - } - } - - msg - Return code or abend resulting from the job submission. - - | **type**: str - | **sample**: CC 0000 - - msg_code - Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". - - | **type**: str - | **sample**: S0C4 - - msg_txt - Returns additional information related to the job. - - | **type**: str - | **sample**: No job can be located with this job name: HELLO - - code - Return code converted to integer value (when possible). - - | **type**: int - - steps - Series of JCL steps that were executed and their return codes. - - | **type**: list - | **elements**: dict - - step_name - Name of the step shown as "was executed" in the DD section. - - | **type**: str - | **sample**: STEP0001 - - step_cc - The CC returned for this step in the DD section. - - | **type**: int - - - - job_class - Job class for this job. - - | **type**: str - | **sample**: A - - svc_class - Service class for this job. - - | **type**: str - | **sample**: C - - priority - A numeric indicator of the job priority assigned through JES. - - | **type**: int - | **sample**: 4 - - asid - The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. - - | **type**: int - - creation_date - Date, local to the target system, when the job was created. - - | **type**: str - | **sample**: 2023-05-04 - - creation_time - Time, local to the target system, when the job was created. - - | **type**: str - | **sample**: 14:15:00 - - queue_position - The position within the job queue where the jobs resides. - - | **type**: int - | **sample**: 3 - - program_name - The name of the program found in the job's last completed step found in the PGM parameter. - - | **type**: str - | **sample**: IEBGENER - - execution_time - Total duration time of the job execution, if it has finished. If the job is still running, it represents the time elapsed from the job execution start and current time. - - | **type**: str - | **sample**: 00:00:10 - - -message - Message returned on failure. - - | **returned**: failure - | **type**: str - | **sample**: {'msg': 'List FAILED! no such job been found: IYK3Z0R9'} - diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst deleted file mode 100644 index 6d31b6abd0..0000000000 --- a/docs/source/modules/zos_job_submit.rst +++ /dev/null @@ -1,895 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_submit.py - -.. _zos_job_submit_module: - - -zos_job_submit -- Submit JCL -============================ - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Submit JCL in a data set, USS file, or file on the controller. -- Submit a job and monitor for completion. -- For an uncataloged dataset, specify the volume serial number. - - - - - -Parameters ----------- - - -src - The source file or data set containing the JCL to submit. - - It could be a physical sequential data set, a partitioned data set qualified by a member or a path (e.g. ``USER.TEST``, ``USER.JCL(TEST)``), or a generation data set from a generation data group (for example, ``USER.TEST.GDG(-2)``). - - Or a USS file. (e.g ``/u/tester/demo/sample.jcl``) - - Or a LOCAL file in ansible control node. (e.g ``/User/tester/ansible-playbook/sample.jcl``) - - When using a generation data set, only already created generations are valid. If either the relative name is positive, or negative but not found, the module will fail. - - | **required**: True - | **type**: str - - -location - The JCL location. Supported choices are ``data_set``, ``uss`` or ``local``. - - ``data_set`` can be a PDS, PDSE, sequential data set, or a generation data set. - - ``uss`` means the JCL location is located in UNIX System Services (USS). - - ``local`` means locally to the Ansible control node. - - | **required**: False - | **type**: str - | **default**: data_set - | **choices**: data_set, uss, local - - -wait_time_s - Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. - - *wait_time_s* is measured in seconds and must be a value greater than 0 and less than 86400. - - The module can submit and forget jobs by setting *wait_time_s* to 0. This way the module will not try to retrieve the job details other than job id. Job details and contents can be retrieved later by using `zos_job_query <./zos_job_query.html>`_ or `zos_job_output <./zos_job_output.html>`_ if needed. - - | **required**: False - | **type**: int - | **default**: 10 - - -max_rc - Specifies the maximum return code allowed for any job step for the submitted job. - - | **required**: False - | **type**: int - - -return_output - Whether to print the DD output. - - If false, an empty list will be returned in the ddnames field. - - | **required**: False - | **type**: bool - | **default**: True - - -volume - The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. - - When configured, the `zos_job_submit <./zos_job_submit.html>`_ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. - - Ignored for *location=uss* and *location=local*. - - | **required**: False - | **type**: str - - -encoding - Specifies which encoding the local JCL file should be converted from and to, before submitting the job. - - This option is only supported for when *location=local*. - - If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system encoding. - - | **required**: False - | **type**: dict - - - from - The character set of the local JCL file; defaults to UTF-8. - - Supported character sets rely on the target platform; the most common character sets are supported. - - | **required**: False - | **type**: str - | **default**: UTF-8 - - - to - The character set to convert the local JCL file to on the remote z/OS system; defaults to IBM-1047 when z/OS systems default encoding can not be identified. - - If not provided, the module will attempt to identify and use the default encoding on the z/OS system. - - Supported character sets rely on the target version; the most common character sets are supported. - - | **required**: False - | **type**: str - | **default**: IBM-1047 - - - -use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - - Only valid when ``src`` is a local file or directory. - - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ - - | **required**: False - | **type**: bool - | **default**: False - - -template_parameters - Options to set the way Jinja2 will process templates. - - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - - These options are ignored unless ``use_template`` is true. - - | **required**: False - | **type**: dict - - - variable_start_string - Marker for the beginning of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: {{ - - - variable_end_string - Marker for the end of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: }} - - - block_start_string - Marker for the beginning of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: {% - - - block_end_string - Marker for the end of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: %} - - - comment_start_string - Marker for the beginning of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: {# - - - comment_end_string - Marker for the end of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: #} - - - line_statement_prefix - Prefix used by Jinja2 to identify line-based statements. - - | **required**: False - | **type**: str - - - line_comment_prefix - Prefix used by Jinja2 to identify comment lines. - - | **required**: False - | **type**: str - - - lstrip_blocks - Whether Jinja2 should strip leading spaces from the start of a line to a block. - - | **required**: False - | **type**: bool - | **default**: False - - - trim_blocks - Whether Jinja2 should remove the first newline after a block is removed. - - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. - - | **required**: False - | **type**: bool - | **default**: True - - - keep_trailing_newline - Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. - - | **required**: False - | **type**: bool - | **default**: False - - - newline_sequence - Sequence that starts a newline in a template. - - | **required**: False - | **type**: str - | **default**: \\n - | **choices**: \\n, \\r, \\r\\n - - auto_reload - Whether to reload a template file when it has changed after the task has started. - - | **required**: False - | **type**: bool - | **default**: False - - - autoescape - Whether to enable autoescape of XML/HTML elements on a template. - - | **required**: False - | **type**: bool - | **default**: True - - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Submit JCL in a PDSE member. - zos_job_submit: - src: HLQ.DATA.LLQ(SAMPLE) - location: data_set - register: response - - - name: Submit JCL in USS with no DDs in the output. - zos_job_submit: - src: /u/tester/demo/sample.jcl - location: uss - return_output: false - - - name: Convert local JCL to IBM-037 and submit the job. - zos_job_submit: - src: /Users/maxy/ansible-playbooks/provision/sample.jcl - location: local - encoding: - from: ISO8859-1 - to: IBM-037 - - - name: Submit JCL in an uncataloged PDSE on volume P2SS01. - zos_job_submit: - src: HLQ.DATA.LLQ(SAMPLE) - location: data_set - volume: P2SS01 - - - name: Submit a long running PDS job and wait up to 30 seconds for completion. - zos_job_submit: - src: HLQ.DATA.LLQ(LONGRUN) - location: data_set - wait_time_s: 30 - - - name: Submit a long running PDS job and wait up to 30 seconds for completion. - zos_job_submit: - src: HLQ.DATA.LLQ(LONGRUN) - location: data_set - wait_time_s: 30 - - - name: Submit JCL and set the max return code the module should fail on to 16. - zos_job_submit: - src: HLQ.DATA.LLQ - location: data_set - max_rc: 16 - - - name: Submit JCL from the latest generation data set in a generation data group. - zos_job_submit: - src: HLQ.DATA.GDG(0) - location: data_set - - - name: Submit JCL from a previous generation data set in a generation data group. - zos_job_submit: - src: HLQ.DATA.GDG(-2) - location: data_set - - - - -Notes ------ - -.. note:: - For supported character sets used to encode data, refer to the `documentation `_. - - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - - - - - - - -Return Values -------------- - - -jobs - List of jobs output. If no job status is found, this will return an empty ret_code with msg_txt explanation. - - | **returned**: success - | **type**: list - | **elements**: dict - | **sample**: - - .. code-block:: json - - [ - { - "asid": 0, - "class": "K", - "content_type": "JOB", - "cpu_time": 1, - "creation_date": "2023-05-03", - "creation_time": "12:13:00", - "ddnames": [ - { - "byte_count": "677", - "content": [ - "1 J E S 2 J O B L O G -- S Y S T E M S T L 1 -- N O D E S T L 1 ", - "0 ", - " 12.50.08 JOB00361 ---- FRIDAY, 13 MAR 2020 ----", - " 12.50.08 JOB00361 IRR010I USERID OMVSADM IS ASSIGNED TO THIS JOB.", - " 12.50.08 JOB00361 ICH70001I OMVSADM LAST ACCESS AT 12:50:03 ON FRIDAY, MARCH 13, 2020", - " 12.50.08 JOB00361 $HASP373 DBDGEN00 STARTED - INIT 15 - CLASS K - SYS STL1", - " 12.50.08 JOB00361 SMF000I DBDGEN00 C ASMA90 0000", - " 12.50.09 JOB00361 SMF000I DBDGEN00 L IEWL 0000", - " 12.50.09 JOB00361 $HASP395 DBDGEN00 ENDED - RC=0000", - "0------ JES2 JOB STATISTICS ------", - "- 13 MAR 2020 JOB EXECUTION DATE", - "- 28 CARDS READ", - "- 158 SYSOUT PRINT RECORDS", - "- 0 SYSOUT PUNCH RECORDS", - "- 12 SYSOUT SPOOL KBYTES", - "- 0.00 MINUTES EXECUTION TIME" - ], - "ddname": "JESMSGLG", - "id": "2", - "procstep": "", - "record_count": "16", - "stepname": "JES2" - }, - { - "byte_count": "2136", - "content": [ - " 1 //DBDGEN00 JOB MSGLEVEL=1,MSGCLASS=E,CLASS=K, JOB00361", - " // LINES=999999,TIME=1440,REGION=0M, ", - " // MEMLIMIT=NOLIMIT ", - " 2 /*JOBPARM SYSAFF=* ", - " //* ", - " 3 //DBDGEN PROC MBR=TEMPNAME ", - " //C EXEC PGM=ASMA90, ", - " // PARM=\u0027OBJECT,NODECK,NOLIST\u0027 ", - " //SYSLIB DD DISP=SHR, ", - " // DSN=IMSBLD.I15RTSMM.SDFSMAC ", - " //SYSLIN DD DISP=(NEW,PASS),RECFM=F,LRECL=80,BLKSIZE=80, ", - " // UNIT=SYSDA,SPACE=(CYL,(10,5),RLSE,,) ", - " //SYSUT1 DD DISP=(NEW,DELETE),UNIT=SYSDA,SPACE=(CYL, ", - " // (10,5),,,) ", - " //SYSPRINT DD SYSOUT=* ", - " //L EXEC PGM=IEWL, ", - " // PARM=\u0027XREF,NOLIST\u0027, ", - " // COND=(0,LT,C) ", - " //SYSLMOD DD DISP=SHR, ", - " // DSN=IMSTESTL.IMS1.DBDLIB(\u0026MBR) ", - " //SYSLIN DD DSN=*.C.SYSLIN,DISP=(OLD,DELETE) ", - " //SYSPRINT DD SYSOUT=* ", - " //* ", - " // PEND ", - " 4 //DLORD6 EXEC DBDGEN, ", - " // MBR=DLORD6 ", - " 5 ++DBDGEN PROC MBR=TEMPNAME ", - " 6 ++C EXEC PGM=ASMA90, ", - " ++ PARM=\u0027OBJECT,NODECK,NOLIST\u0027 ", - " 7 ++SYSLIB DD DISP=SHR, ", - " ++ DSN=IMSBLD.I15RTSMM.SDFSMAC ", - " 8 ++SYSLIN DD DISP=(NEW,PASS),RECFM=F,LRECL=80,BLKSIZE=80, ", - " ++ UNIT=SYSDA,SPACE=(CYL,(10,5),RLSE,,) ", - " 9 ++SYSUT1 DD DISP=(NEW,DELETE),UNIT=SYSDA,SPACE=(CYL, ", - " ++ (10,5),,,) ", - " 10 ++SYSPRINT DD SYSOUT=* ", - " 11 //SYSIN DD DISP=SHR, ", - " // DSN=IMSTESTL.IMS1.DBDSRC(DLORD6) ", - " 12 ++L EXEC PGM=IEWL, ", - " ++ PARM=\u0027XREF,NOLIST\u0027, ", - " ++ COND=(0,LT,C) ", - " 13 ++SYSLMOD DD DISP=SHR, ", - " ++ DSN=IMSTESTL.IMS1.DBDLIB(\u0026MBR) ", - " IEFC653I SUBSTITUTION JCL - DISP=SHR,DSN=IMSTESTL.IMS1.DBDLIB(DLORD6)", - " 14 ++SYSLIN DD DSN=*.C.SYSLIN,DISP=(OLD,DELETE) ", - " 15 ++SYSPRINT DD SYSOUT=* ", - " ++* " - ], - "ddname": "JESJCL", - "id": "3", - "procstep": "", - "record_count": "47", - "stepname": "JES2" - }, - { - "byte_count": "2414", - "content": [ - " STMT NO. MESSAGE", - " 4 IEFC001I PROCEDURE DBDGEN WAS EXPANDED USING INSTREAM PROCEDURE DEFINITION", - " ICH70001I OMVSADM LAST ACCESS AT 12:50:03 ON FRIDAY, MARCH 13, 2020", - " IEF236I ALLOC. FOR DBDGEN00 C DLORD6", - " IEF237I 083C ALLOCATED TO SYSLIB", - " IGD100I 0940 ALLOCATED TO DDNAME SYSLIN DATACLAS ( )", - " IGD100I 0942 ALLOCATED TO DDNAME SYSUT1 DATACLAS ( )", - " IEF237I JES2 ALLOCATED TO SYSPRINT", - " IEF237I 01A0 ALLOCATED TO SYSIN", - " IEF142I DBDGEN00 C DLORD6 - STEP WAS EXECUTED - COND CODE 0000", - " IEF285I IMSBLD.I15RTSMM.SDFSMAC KEPT ", - " IEF285I VOL SER NOS= IMSBG2. ", - " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101894 PASSED ", - " IEF285I VOL SER NOS= 000000. ", - " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101895 DELETED ", - " IEF285I VOL SER NOS= 333333. ", - " IEF285I OMVSADM.DBDGEN00.JOB00361.D0000101.? SYSOUT ", - " IEF285I IMSTESTL.IMS1.DBDSRC KEPT ", - " IEF285I VOL SER NOS= USER03. ", - " IEF373I STEP/C /START 2020073.1250", - " IEF032I STEP/C /STOP 2020073.1250 ", - " CPU: 0 HR 00 MIN 00.03 SEC SRB: 0 HR 00 MIN 00.00 SEC ", - " VIRT: 252K SYS: 240K EXT: 1876480K SYS: 11896K", - " ATB- REAL: 1048K SLOTS: 0K", - " VIRT- ALLOC: 14M SHRD: 0M", - " IEF236I ALLOC. FOR DBDGEN00 L DLORD6", - " IEF237I 01A0 ALLOCATED TO SYSLMOD", - " IEF237I 0940 ALLOCATED TO SYSLIN", - " IEF237I JES2 ALLOCATED TO SYSPRINT", - " IEF142I DBDGEN00 L DLORD6 - STEP WAS EXECUTED - COND CODE 0000", - " IEF285I IMSTESTL.IMS1.DBDLIB KEPT ", - " IEF285I VOL SER NOS= USER03. ", - " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101894 DELETED ", - " IEF285I VOL SER NOS= 000000. ", - " IEF285I OMVSADM.DBDGEN00.JOB00361.D0000102.? SYSOUT ", - " IEF373I STEP/L /START 2020073.1250", - " IEF032I STEP/L /STOP 2020073.1250 ", - " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC ", - " VIRT: 92K SYS: 256K EXT: 1768K SYS: 11740K", - " ATB- REAL: 1036K SLOTS: 0K", - " VIRT- ALLOC: 11M SHRD: 0M", - " IEF375I JOB/DBDGEN00/START 2020073.1250", - " IEF033I JOB/DBDGEN00/STOP 2020073.1250 ", - " CPU: 0 HR 00 MIN 00.03 SEC SRB: 0 HR 00 MIN 00.00 SEC " - ], - "ddname": "JESYSMSG", - "id": "4", - "procstep": "", - "record_count": "44", - "stepname": "JES2" - }, - { - "byte_count": "1896", - "content": [ - "1z/OS V2 R2 BINDER 12:50:08 FRIDAY MARCH 13, 2020 ", - " BATCH EMULATOR JOB(DBDGEN00) STEP(DLORD6 ) PGM= IEWL PROCEDURE(L ) ", - " IEW2278I B352 INVOCATION PARAMETERS - XREF,NOLIST ", - " IEW2650I 5102 MODULE ENTRY NOT PROVIDED. ENTRY DEFAULTS TO SECTION DLORD6. ", - " ", - " ", - "1 C R O S S - R E F E R E N C E T A B L E ", - " _________________________________________ ", - " ", - " TEXT CLASS = B_TEXT ", - " ", - " --------------- R E F E R E N C E -------------------------- T A R G E T -------------------------------------------", - " CLASS ELEMENT | ELEMENT |", - " OFFSET SECT/PART(ABBREV) OFFSET TYPE | SYMBOL(ABBREV) SECTION (ABBREV) OFFSET CLASS NAME |", - " | |", - " *** E N D O F C R O S S R E F E R E N C E *** ", - "1z/OS V2 R2 BINDER 12:50:08 FRIDAY MARCH 13, 2020 ", - " BATCH EMULATOR JOB(DBDGEN00) STEP(DLORD6 ) PGM= IEWL PROCEDURE(L ) ", - " IEW2850I F920 DLORD6 HAS BEEN SAVED WITH AMODE 24 AND RMODE 24. ENTRY POINT NAME IS DLORD6. ", - " IEW2231I 0481 END OF SAVE PROCESSING. ", - " IEW2008I 0F03 PROCESSING COMPLETED. RETURN CODE = 0. ", - " ", - " ", - " ", - "1---------------------- ", - " MESSAGE SUMMARY REPORT ", - " ---------------------- ", - " TERMINAL MESSAGES (SEVERITY = 16) ", - " NONE ", - " ", - " SEVERE MESSAGES (SEVERITY = 12) ", - " NONE ", - " ", - " ERROR MESSAGES (SEVERITY = 08) ", - " NONE ", - " ", - " WARNING MESSAGES (SEVERITY = 04) ", - " NONE ", - " ", - " INFORMATIONAL MESSAGES (SEVERITY = 00) ", - " 2008 2231 2278 2650 2850 ", - " ", - " ", - " **** END OF MESSAGE SUMMARY REPORT **** ", - " " - ], - "ddname": "SYSPRINT", - "id": "102", - "procstep": "L", - "record_count": "45", - "stepname": "DLORD6" - } - ], - "execution_node": "STL1", - "execution_time": "00:00:10", - "job_class": "K", - "job_id": "JOB00361", - "job_name": "DBDGEN00", - "origin_node": "STL1", - "owner": "OMVSADM", - "priority": 1, - "program_name": "IEBGENER", - "queue_position": 3, - "ret_code": { - "code": 0, - "msg": "CC 0000", - "msg_code": "0000", - "msg_txt": "", - "steps": [ - { - "step_cc": 0, - "step_name": "DLORD6" - } - ] - }, - "subsystem": "STL1", - "svc_class": "?", - "system": "STL1" - } - ] - - job_id - The z/OS job ID of the job containing the spool file. - - | **type**: str - | **sample**: JOB00134 - - job_name - The name of the batch job. - - | **type**: str - | **sample**: HELLO - - content_type - Type of address space used by the job, can be one of the following types. - - APPC for an APPC Initiator. - - JGRP for a JOBGROUP. - - JOB for a Batch job. - - STC for a Started task. - - TSU for a Time sharing user. - - \? for an unknown or pending job. - - | **type**: str - | **sample**: STC - - duration - The total lapsed time the JCL ran for. - - | **type**: int - - execution_time - Total duration time of the job execution, if it has finished. - - | **type**: str - | **sample**: 00:00:10 - - ddnames - Data definition names. - - | **type**: list - | **elements**: dict - - ddname - Data definition name. - - | **type**: str - | **sample**: JESMSGLG - - record_count - Count of the number of lines in a print data set. - - | **type**: int - | **sample**: 17 - - id - The file ID. - - | **type**: str - | **sample**: 2 - - stepname - A step name is name that identifies the job step so that other JCL statements or the operating system can refer to it. - - | **type**: str - | **sample**: JES2 - - procstep - Identifies the set of statements inside JCL grouped together to perform a particular function. - - | **type**: str - | **sample**: PROC1 - - byte_count - Byte size in a print data set. - - | **type**: int - | **sample**: 574 - - content - The ddname content. - - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", - " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", - " //* ", - " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", - " //* ", - " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", - " //* AND WILL CAUSE A JCL ERROR ", - " //* ", - " 2 //STEP0001 EXEC PGM=IEBGENER ", - " 3 //SYSIN DD DUMMY ", - " 4 //SYSPRINT DD SYSOUT=* ", - " 5 //SYSUT1 DD * ", - " 6 //SYSUT2 DD SYSOUT=* ", - " 7 // " - ] - - - ret_code - Return code output collected from the job log. - - | **type**: dict - | **sample**: - - .. code-block:: json - - { - "ret_code": { - "code": 0, - "msg": "CC 0000", - "msg_code": "0000", - "msg_txt": "", - "steps": [ - { - "step_cc": 0, - "step_name": "STEP0001" - } - ] - } - } - - msg - Job status resulting from the job submission. - - Job status `ABEND` indicates the job ended abnormally. - - Job status `AC` indicates the job is active, often a started task or job taking long. - - Job status `CAB` indicates a converter abend. - - Job status `CANCELED` indicates the job was canceled. - - Job status `CNV` indicates a converter error. - - Job status `FLU` indicates the job was flushed. - - Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. - - Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. - - Job status `SYS` indicates a system failure. - - Job status `?` indicates status can not be determined. - - Job status `TYPRUN=SCAN` indicates that the job had the TYPRUN parameter with SCAN option. - - Job status `TYPRUN=COPY` indicates that the job had the TYPRUN parameter with COPY option. - - Job status `HOLD` indicates that the job had the TYPRUN parameter with either the HOLD or JCLHOLD options. - - Jobs where status can not be determined will result in None (NULL). - - | **type**: str - | **sample**: AC - - msg_code - The return code from the submitted job as a string. - - Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. - - | **type**: str - - msg_txt - Returns additional information related to the submitted job. - - Jobs which have no additional information will result in None (NULL). - - | **type**: str - | **sample**: The job JOB00551 was run with special job processing TYPRUN=SCAN. This will result in no completion, return code or job steps and changed will be false. - - code - The return code converted to an integer value when available. - - Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. - - | **type**: int - - steps - Series of JCL steps that were executed and their return codes. - - | **type**: list - | **elements**: dict - - step_name - Name of the step shown as "was executed" in the DD section. - - | **type**: str - | **sample**: STEP0001 - - step_cc - The CC returned for this step in the DD section. - - | **type**: int - - - - job_class - Job class for this job. - - | **type**: str - | **sample**: A - - svc_class - Service class for this job. - - | **type**: str - | **sample**: C - - priority - A numeric indicator of the job priority assigned through JES. - - | **type**: int - | **sample**: 4 - - asid - The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. - - | **type**: int - - creation_date - Date, local to the target system, when the job was created. - - | **type**: str - | **sample**: 2023-05-04 - - creation_time - Time, local to the target system, when the job was created. - - | **type**: str - | **sample**: 14:15:00 - - queue_position - The position within the job queue where the jobs resides. - - | **type**: int - | **sample**: 3 - - program_name - The name of the program found in the job's last completed step found in the PGM parameter. - - | **type**: str - | **sample**: IEBGENER - - system - The job entry system that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - subsystem - The job entry subsystem that MVS uses to do work. - - | **type**: str - | **sample**: STL1 - - cpu_time - Sum of the CPU time used by each job step, in microseconds. - - | **type**: int - | **sample**: 5 - - execution_node - Execution node that picked the job and executed it. - - | **type**: str - | **sample**: STL1 - - origin_node - Origin node that submitted the job. - - | **type**: str - | **sample**: STL1 - - diff --git a/docs/source/modules/zos_lineinfile.rst b/docs/source/modules/zos_lineinfile.rst deleted file mode 100644 index 3ed1a1e339..0000000000 --- a/docs/source/modules/zos_lineinfile.rst +++ /dev/null @@ -1,351 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_lineinfile.py - -.. _zos_lineinfile_module: - - -zos_lineinfile -- Manage textual data on z/OS -============================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Manage lines in z/OS UNIX System Services (USS) files, PS (sequential data set), PDS, PDSE, or member of a PDS or PDSE. -- This module ensures a particular line is in a USS file or data set, or replace an existing line using a back-referenced regular expression. -- This is primarily useful when you want to change a single line in a USS file or data set only. - - - - - -Parameters ----------- - - -src - The location can be a UNIX System Services (USS) file, PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. - - The USS file must be an absolute pathname. - - Generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). - - | **required**: True - | **type**: str - - -regexp - The regular expression to look for in every line of the USS file or data set. - - For ``state=present``, the pattern to replace if found. Only the last line found will be replaced. - - For ``state=absent``, the pattern of the line(s) to remove. - - If the regular expression is not matched, the line will be added to the USS file or data set in keeping with ``insertbefore`` or ``insertafter`` settings. - - When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by ``line`` to ensure idempotence. - - | **required**: False - | **type**: str - - -state - Whether the line should be inserted/replaced(present) or removed(absent). - - | **required**: False - | **type**: str - | **default**: present - | **choices**: absent, present - - -line - The line to insert/replace into the USS file or data set. - - Required for ``state=present``. - - If ``backrefs`` is set, may contain backreferences that will get expanded with the ``regexp`` capture groups if the regexp matches. - - | **required**: False - | **type**: str - - -backrefs - Used with ``state=present``. - - If set, ``line`` can contain backreferences (both positional and named) that will get populated if the ``regexp`` matches. - - This parameter changes the operation of the module slightly; ``insertbefore`` and ``insertafter`` will be ignored, and if the ``regexp`` does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. - - If the ``regexp`` does match, the last matching line will be replaced by the expanded line parameter. - - | **required**: False - | **type**: bool - | **default**: False - - -insertafter - Used with ``state=present``. - - If specified, the line will be inserted after the last match of specified regular expression. - - If the first match is required, use(firstmatch=yes). - - A special value is available; ``EOF`` for inserting the line at the end of the USS file or data set. - - If the specified regular expression has no matches, EOF will be used instead. - - If ``insertbefore`` is set, default value ``EOF`` will be ignored. - - If regular expressions are passed to both ``regexp`` and ``insertafter``, ``insertafter`` is only honored if no match for ``regexp`` is found. - - May not be used with ``backrefs`` or ``insertbefore``. - - Choices are EOF or '*regex*' - - Default is EOF - - | **required**: False - | **type**: str - - -insertbefore - Used with ``state=present``. - - If specified, the line will be inserted before the last match of specified regular expression. - - If the first match is required, use ``firstmatch=yes``. - - A value is available; ``BOF`` for inserting the line at the beginning of the USS file or data set. - - If the specified regular expression has no matches, the line will be inserted at the end of the USS file or data set. - - If regular expressions are passed to both ``regexp`` and ``insertbefore``, ``insertbefore`` is only honored if no match for ``regexp`` is found. - - May not be used with ``backrefs`` or ``insertafter``. - - Choices are BOF or '*regex*' - - | **required**: False - | **type**: str - - -backup - Creates a backup file or backup data set for *src*, including the timestamp information to ensure that you retrieve the original file. - - *backup_name* can be used to specify a backup file name if *backup=true*. - - The backup file name will be return on either success or failure of module execution such that data can be retrieved. - - Use generation data set (GDS) relative positive name SOME.CREATION(+1) - - | **required**: False - | **type**: bool - | **default**: False - - -backup_name - Specify the USS file name or data set name for the destination backup. - - If the source *src* is a USS file or path, the backup_name must be a file or path name, and the USS file or path must be an absolute path name. - - If the source is an MVS data set, the backup_name must be an MVS data set name. - - If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. - - If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - - | **required**: False - | **type**: str - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -firstmatch - Used with ``insertafter`` or ``insertbefore``. - - If set, ``insertafter`` and ``insertbefore`` will work with the first line that matches the given regular expression. - - | **required**: False - | **type**: bool - | **default**: False - - -encoding - The character set of the source *src*. `zos_lineinfile <./zos_lineinfile.html>`_ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: False - | **type**: str - | **default**: IBM-1047 - - -force - Specifies that the data set can be shared with others during an update which results in the data set you are updating to be simultaneously updated by others. - - This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. - - The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. - - | **required**: False - | **type**: bool - | **default**: False - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Ensure value of a variable in the sequential data set - zos_lineinfile: - src: SOME.DATA.SET - regexp: '^VAR=' - line: VAR="some value" - - - name: Remove all comments in the USS file - zos_lineinfile: - src: /tmp/src/somefile - state: absent - regexp: '^#' - - - name: Ensure the https port is 8080 - zos_lineinfile: - src: /tmp/src/somefile - regexp: '^Listen ' - insertafter: '^#Listen ' - line: Listen 8080 - - - name: Ensure we have our own comment added to the partitioned data set member - zos_lineinfile: - src: SOME.PARTITIONED.DATA.SET(DATA) - regexp: '#^VAR=' - insertbefore: '^VAR=' - line: '# VAR default value' - - - name: Ensure the user working directory for liberty is set as needed - zos_lineinfile: - src: /tmp/src/somefile - regexp: '^(.*)User(\d+)m(.*)$' - line: '\1APPUser\3' - backrefs: true - - - name: Add a line to a member while a task is in execution - zos_lineinfile: - src: SOME.PARTITIONED.DATA.SET(DATA) - insertafter: EOF - line: 'Should be a working test now' - force: true - - - name: Add a line to a gds - zos_lineinfile: - src: SOME.CREATION(-2) - insertafter: EOF - line: 'Should be a working test now' - - - name: Add a line to dataset and backup in a new generation of gds - zos_lineinfile: - src: SOME.CREATION.TEST - insertafter: EOF - backup: true - backup_name: CREATION.GDS(+1) - line: 'Should be a working test now' - - - - -Notes ------ - -.. note:: - It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. - - All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. - - For supported character sets used to encode data, refer to the `documentation `_. - - - - - - - -Return Values -------------- - - -changed - Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - 1 - -found - Number of the matching patterns - - | **returned**: success - | **type**: int - | **sample**: 5 - -cmd - constructed dsed shell cmd based on the parameters - - | **returned**: success - | **type**: str - | **sample**: dsedhelper -d -en IBM-1047 /^PATH=/a\\PATH=/dir/bin:$PATH/$ /etc/profile - -msg - The module messages - - | **returned**: failure - | **type**: str - | **sample**: Parameter verification failed - -return_content - The error messages from ZOAU dsed - - | **returned**: failure - | **type**: str - | **sample**: BGYSC1311E Iconv error, cannot open converter from ISO-88955-1 to IBM-1047 - -backup_name - Name of the backup file or data set that was created. - - | **returned**: if backup=true - | **type**: str - | **sample**: /path/to/file.txt.2015-02-03@04:15~ - diff --git a/docs/source/modules/zos_mount.rst b/docs/source/modules/zos_mount.rst deleted file mode 100644 index 703795c3da..0000000000 --- a/docs/source/modules/zos_mount.rst +++ /dev/null @@ -1,628 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_mount.py - -.. _zos_mount_module: - - -zos_mount -- Mount a z/OS file system. -====================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The module `zos_mount <./zos_mount.html>`_ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. -- The *src* data set must be unique and a Fully Qualified Name (FQN). -- The *path* will be created if needed. - - - - - -Parameters ----------- - - -path - The absolute path name onto which the file system is to be mounted. - - The *path* is case sensitive and must be less than or equal 1023 characters long. - - | **required**: True - | **type**: str - - -src - The name of the file system to be added to the file system hierarchy. - - The file system *src* must be a data set of type *fs_type*. - - The file system *src* data set must be cataloged. - - | **required**: True - | **type**: str - - -fs_type - The type of file system that will be mounted. - - The physical file systems data set format to perform the logical mount. - - The *fs_type* is required to be lowercase. - - | **required**: True - | **type**: str - | **choices**: hfs, zfs, nfs, tfs - - -state - The desired status of the described mount (choice). - - If *state=mounted* and *src* are not in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will be updated, the device will be mounted and the module will complete successfully with *changed=True*. - - - If *state=mounted* and *src* are in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will not be updated, the device will not be mounted and the module will complete successfully with *changed=False*. - - - If *state=unmounted* and *src* are in use, the module will **not** add the file system entry to the parmlib member *persistent/data_store*. The device will be unmounted and the module will complete successfully with *changed=True*. - - - If *state=unmounted* and *src* are not in use, the module will **not** add the file system entry to parmlib member *persistent/data_store*.The device will remain unchanged and the module will complete with *changed=False*. - - - If *state=present*, the module will add the file system entry to the provided parmlib member *persistent/data_store* if not present. The module will complete successfully with *changed=True*. - - - If *state=absent*, the module will remove the file system entry to the provided parmlib member *persistent/data_store* if present. The module will complete successfully with *changed=True*. - - - If *state=remounted*, the module will **not** add the file system entry to parmlib member *persistent/data_store*. The device will be unmounted and mounted, the module will complete successfully with *changed=True*. - - - | **required**: False - | **type**: str - | **default**: mounted - | **choices**: absent, mounted, unmounted, present, remounted - - -persistent - Add or remove mount command entries to provided *data_store* - - | **required**: False - | **type**: dict - - - data_store - The data set name used for persisting a mount command. This is usually BPXPRMxx or a copy. - - | **required**: True - | **type**: str - - - backup - Creates a backup file or backup data set for *data_store*, including the timestamp information to ensure that you retrieve the original parameters defined in *data_store*. - - *backup_name* can be used to specify a backup file name if *backup=true*. - - The backup file name will be returned on either success or failure of module execution such that data can be retrieved. - - | **required**: False - | **type**: bool - | **default**: False - - - backup_name - Specify the USS file name or data set name for the destination backup. - - If the source *data_store* is a USS file or path, the *backup_name* name can be relative or absolute for file or path name. - - If the source is an MVS data set, the backup_name must be an MVS data set name. - - If the backup_name is not provided, the default *backup_name* will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. - - If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. - - | **required**: False - | **type**: str - - - comment - If provided, this is used as a comment that surrounds the command in the *persistent/data_store* - - Comments are used to encapsulate the *persistent/data_store* entry such that they can easily be understood and located. - - | **required**: False - | **type**: list - | **elements**: str - - - -unmount_opts - Describes how the unmount will be performed. - - For more on coded character set identifiers, review the IBM documentation topic **UNMOUNT - Remove a file system from the file hierarchy**. - - | **required**: False - | **type**: str - | **default**: normal - | **choices**: drain, force, immediate, normal, remount, reset - - -mount_opts - Options available to the mount. - - If *mount_opts=ro* on a mounted/remount, mount is performed read-only. - - If *mount_opts=same* and (unmount_opts=remount), mount is opened in the same mode as previously opened. - - If *mount_opts=nowait*, mount is performed asynchronously. - - If *mount_opts=nosecurity*, security checks are not enforced for files in this file system. - - | **required**: False - | **type**: str - | **default**: rw - | **choices**: ro, rw, same, nowait, nosecurity - - -src_params - Specifies a parameter string to be passed to the file system type. - - The parameter format and content are specified by the file system type. - - | **required**: False - | **type**: str - - -tag_untagged - If present, tags get written to any untagged file. - - When the file system is unmounted, the tags are lost. - - If *tag_untagged=notext* none of the untagged files in the file system are automatically converted during file reading and writing. - - If *tag_untagged=text* each untagged file is implicitly marked as containing pure text data that can be converted. - - If this flag is used, use of tag_ccsid is encouraged. - - | **required**: False - | **type**: str - | **choices**: text, notext - - -tag_ccsid - Identifies the coded character set identifier (ccsid) to be implicitly set for the untagged file. - - For more on coded character set identifiers, review the IBM documentation topic **Coded Character Sets**. - - Specified as a decimal value from 0 to 65535. However, when TEXT is specified, the value must be between 0 and 65535. - - The value is not checked as being valid and the corresponding code page is not checked as being installed. - - Required when *tag_untagged=TEXT*. - - | **required**: False - | **type**: int - - -allow_uid - Specifies whether the SETUID and SETGID mode bits on an executable in this file system are considered. Also determines whether the APF extended attribute or the Program Control extended attribute is honored. - - - If *allow_uid=True* the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. - - - If *allow_uid=False* the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. - - - | **required**: False - | **type**: bool - | **default**: True - - -sysname - For systems participating in shared file system, *sysname* specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). - - - *sysname* is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. - - - | **required**: False - | **type**: str - - -automove - These parameters apply only in a sysplex where systems are exploiting the shared file system capability. They specify what happens to the ownership of a file system when a shutdown, PFS termination, dead system takeover, or file system move occurs. The default setting is AUTOMOVE where the file system will be randomly moved to another system (no system list used). - - - *automove=automove* indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. - - - *automove=noautomove* prevents movement of the file system's ownership in some situations. - - - *automove=unmount* allows the file system to be unmounted in some situations. - - - | **required**: False - | **type**: str - | **default**: automove - | **choices**: automove, noautomove, unmount - - -automove_list - If(automove=automove), this option will be checked. - - - This specifies the list of servers to include or exclude as destinations. - - - None is a valid value, meaning 'move anywhere'. - - - Indicator is either INCLUDE or EXCLUDE, which can also be abbreviated as I or E. - - - | **required**: False - | **type**: str - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Mount a filesystem. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - - - name: Unmount a filesystem. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: unmounted - unmount_opts: remount - mount_opts: same - - - name: Mount a filesystem readonly. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - mount_opts: ro - - - name: Mount a filesystem and record change in BPXPRMAA. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - persistent: - data_store: SYS1.PARMLIB(BPXPRMAA) - comment: For Tape2 project - - - name: Mount a filesystem and record change in BPXPRMAA after backing up to BPXPRMAB. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - persistent: - data_store: SYS1.PARMLIB(BPXPRMAA) - backup: true - backup_name: SYS1.PARMLIB(BPXPRMAB) - comment: For Tape2 project - - - name: Mount a filesystem ignoring uid/gid values. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - allow_uid: false - - - name: Mount a filesystem asynchronously (don't wait for completion). - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - mount_opts: nowait - - - name: Mount a filesystem with no security checks. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - mount_opts: nosecurity - - - name: Mount a filesystem, limiting automove to 4 devices. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - automove: automove - automove_list: I,DEV1,DEV2,DEV3,DEV9 - - - name: Mount a filesystem, limiting automove to all except 4 devices. - zos_mount: - src: SOMEUSER.VVV.ZFS - path: /u/omvsadm/core - fs_type: zfs - state: mounted - automove: automove - automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 - - - - -Notes ------ - -.. note:: - All data sets are always assumed to be cataloged. - - If an uncataloged data set needs to be fetched, it should be cataloged first. - - Uncataloged data sets can be cataloged using the `zos_data_set <./zos_data_set.html>`_ module. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_data_set_module` - - - - -Return Values -------------- - - -path - The absolute path name onto which the file system is to be mounted. - - | **returned**: always - | **type**: str - | **sample**: /u/omvsadm/core - -src - The file in z/OS that is to be mounted. - - | **returned**: always - | **type**: str - | **sample**: SOMEUSER.VVV.ZFS - -fs_type - The type of file system that will perform the logical mount request. - - | **returned**: always - | **type**: str - | **sample**: ZFS - -state - The desired status of the described mount. - - | **returned**: always - | **type**: str - | **sample**: mounted - -persistent - Values the user provided as input. - - | **returned**: always - | **type**: dict - - data_store - The persistent store name where the mount was written to. - - | **returned**: always - | **type**: str - | **sample**: SYS1.FILESYS(BPXPRMAA) - - backup - Indicates if a backup of destinattion was configured. - - | **returned**: always - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - backup_name - The unique data set name for the destination backup. - - | **returned**: always - | **type**: str - | **sample**: SYS1.FILESYS(PRMAABAK) - - comment - The text that was used in markers around the *Persistent/data_store* entry. - - | **returned**: always - | **type**: list - | **sample**: - - .. code-block:: json - - [ - [ - "u\u0027I did this because..\u0027" - ] - ] - - -unmount_opts - Describes how the unmount is to be performed. - - | **returned**: changed and if state=unmounted - | **type**: str - | **sample**: drain - -mount_opts - Options available to the mount. - - | **returned**: whenever non-None - | **type**: str - | **sample**: rw,nosecurity - -src_params - Specifies a parameter string to be passed to the file system type. - - | **returned**: whenever non-None - | **type**: str - | **sample**: D(101) - -tag_untagged - Indicates if tags should be written to untagged files. - - | **returned**: whenever Non-None - | **type**: str - | **sample**: TEXT - -tag_ccsid - CCSID for untagged files in the mounted file system. - - | **returned**: when tag_untagged is defined - | **type**: int - | **sample**: 819 - -allow_uid - Whether the SETUID and SETGID mode bits on executables in this file system are considered. - - | **returned**: always - | **type**: bool - | **sample**: - - .. code-block:: json - - true - -sysname - *sysname* specifies the particular system on which a mount should be performed. - - | **returned**: if Non-None - | **type**: str - | **sample**: MVSSYS01 - -automove - Specifies what happens to the ownership of a file system during a shutdown, PFS termination, dead system takeover, or when file system move occurs. - - - | **returned**: if Non-None - | **type**: str - | **sample**: automove - -automove_list - This specifies the list of servers to include or exclude as destinations. - - | **returned**: if Non-None - | **type**: str - | **sample**: I,SERV01,SERV02,SERV03,SERV04 - -msg - Failure message returned by the module. - - | **returned**: failure - | **type**: str - | **sample**: Error while gathering information - -stdout - The stdout from the mount command. - - | **returned**: always - | **type**: str - | **sample**: MOUNT FILESYSTEM( 'source-dataset' ) MOUNTPOINT( '/uss-path' ) TYPE( ZFS ) - -stderr - The stderr from the mount command. - - | **returned**: failure - | **type**: str - | **sample**: No such file or directory "/tmp/foo" - -stdout_lines - List of strings containing individual lines from stdout. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "u\"MOUNT FILESYSTEM( \u0027source-dataset\u0027 ) MOUNTPOINT( \u0027/uss-path\u0027 ) TYPE( ZFS )\"" - ] - -stderr_lines - List of strings containing individual lines from stderr. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - { - "u\"FileNotFoundError": "No such file or directory \u0027/tmp/foo\u0027\"" - } - ] - -cmd - The actual command that was run by the module. - - | **returned**: failure - | **type**: str - | **sample**: MOUNT FILESYSTEM( 'EXAMPLE.DATA.SET' ) MOUNTPOINT( '/u/omvsadm/sample' ) TYPE( ZFS ) - -rc - The return code of the mount command, if applicable. - - | **returned**: failure - | **type**: int - | **sample**: 8 - diff --git a/docs/source/modules/zos_mvs_raw.rst b/docs/source/modules/zos_mvs_raw.rst deleted file mode 100644 index 4d0abec6f2..0000000000 --- a/docs/source/modules/zos_mvs_raw.rst +++ /dev/null @@ -1,2083 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_mvs_raw.py - -.. _zos_mvs_raw_module: - - -zos_mvs_raw -- Run a z/OS program. -================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Run a z/OS program. -- This is analogous to a job step in JCL. -- Defaults will be determined by underlying API if value not provided. - - - - - -Parameters ----------- - - -program_name - The name of the z/OS program to run (e.g. IDCAMS, IEFBR14, IEBGENER etc.). - - | **required**: True - | **type**: str - - -parm - The program arguments (e.g. -a='MARGINS(1,72)'). - - | **required**: False - | **type**: str - - -auth - Determines whether this program should run with authorized privileges. - - If *auth=true*, the program runs as APF authorized. - - If *auth=false*, the program runs as unauthorized. - - | **required**: False - | **type**: bool - | **default**: False - - -verbose - Determines if verbose output should be returned from the underlying utility used by this module. - - When *verbose=true* verbose output is returned on module failure. - - | **required**: False - | **type**: bool - | **default**: False - - -max_rc - Specifies the maximum return code allowed for the program output. If the program generates a return code higher than the specified maximum, the module will fail. - - | **required**: False - | **type**: int - | **default**: 0 - - -dds - The input data source. - - *dds* supports 6 types of sources - - 1. *dd_data_set* for data set files. - - 2. *dd_unix* for UNIX files. - - 3. *dd_input* for in-stream data set. - - 4. *dd_dummy* for no content input. - - 5. *dd_concat* for a data set concatenation. - - 6. *dds* supports any combination of source types. - - | **required**: False - | **type**: list - | **elements**: dict - - - dd_data_set - Specify a data set. - - *dd_data_set* can reference an existing data set or be used to define a new data set to be created during execution. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - data_set_name - The data set name. - - A data set name can be a GDS relative name. - - When using GDS relative name and it is a positive generation, *disposition=new* must be used. - - | **required**: False - | **type**: str - - - raw - Create a new data set and let the MVS program assign its own default DCB attributes. - - When ``raw=true``, all supplied DCB attributes like disposition, space, volumes, SMS, keys, record settings, etc. are ignored. - - Using ``raw`` option is not possible for all programs, use this for cases where the MVS program that is called is able to assign its own default dataset attributes. - - | **required**: False - | **type**: bool - | **default**: False - - - type - The data set type. Only required when *disposition=new*. - - Maps to DSNTYPE on z/OS. - - | **required**: False - | **type**: str - | **choices**: library, pds, pdse, large, basic, seq, rrds, esds, lds, ksds - - - disposition - *disposition* indicates the status of a data set. - - Defaults to shr. - - | **required**: False - | **type**: str - | **choices**: new, shr, mod, old - - - disposition_normal - *disposition_normal* indicates what to do with the data set after a normal termination of the program. - - | **required**: False - | **type**: str - | **choices**: delete, keep, catalog, uncatalog - - - disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after an abnormal termination of the program. - - | **required**: False - | **type**: str - | **choices**: delete, keep, catalog, uncatalog - - - reuse - Determines if a data set should be reused if *disposition=new* and if a data set with a matching name already exists. - - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. - - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. - - Mutually exclusive with *replace*. - - *reuse* is only considered when *disposition=new* - - | **required**: False - | **type**: bool - | **default**: False - - - replace - Determines if a data set should be replaced if *disposition=new* and a data set with a matching name already exists. - - If *replace=true*, the original data set will be deleted, and a new data set created. - - If *replace=false*, and a data set with a matching name already exists, allocation will fail. - - Mutually exclusive with *reuse*. - - *replace* is only considered when *disposition=new* - - *replace* will result in loss of all data in the original data set unless *backup* is specified. - - | **required**: False - | **type**: bool - | **default**: False - - - backup - Determines if a backup should be made of an existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. - - *backup* is only used when *replace=true*. - - | **required**: False - | **type**: bool - | **default**: False - - - space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. - - | **required**: False - | **type**: str - | **choices**: trk, cyl, b, k, m, g - - - space_primary - The primary amount of space to allocate for a new data set. - - The value provided to *space_type* is used as the unit of space for the allocation. - - Not applicable when *space_type=blklgth* or *space_type=reclgth*. - - | **required**: False - | **type**: int - - - space_secondary - When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - - The value provided to *space_type* is used as the unit of space for the allocation. - - Not applicable when *space_type=blklgth* or *space_type=reclgth*. - - | **required**: False - | **type**: int - - - volumes - The volume or volumes on which a data set resides or will reside. - - Do not specify the same volume multiple times. - - | **required**: False - | **type**: raw - - - sms_management_class - The desired management class for a new SMS-managed data set. - - *sms_management_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - sms_storage_class - The desired storage class for a new SMS-managed data set. - - *sms_storage_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - sms_data_class - The desired data class for a new SMS-managed data set. - - *sms_data_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - block_size - The maximum length of a block in bytes. - - Default is dependent on *record_format* - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - key_label - The label for the encryption key used by the system to encrypt the data set. - - *key_label* is the public name of a protected encryption key in the ICSF key repository. - - *key_label* should only be provided when creating an extended format data set. - - Maps to DSKEYLBL on z/OS. - - | **required**: False - | **type**: str - - - encryption_key_1 - The encrypting key used by the Encryption Key Manager. - - Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. - - | **required**: False - | **type**: dict - - - label - The label for the key encrypting key used by the Encryption Key Manager. - - Key label must have a private key associated with it. - - *label* can be a maximum of 64 characters. - - Maps to KEYLAB1 on z/OS. - - | **required**: True - | **type**: str - - - encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - - *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. - - Maps to KEYCD1 on z/OS. - - | **required**: True - | **type**: str - | **choices**: l, h - - - - encryption_key_2 - The encrypting key used by the Encryption Key Manager. - - Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. - - | **required**: False - | **type**: dict - - - label - The label for the key encrypting key used by the Encryption Key Manager. - - Key label must have a private key associated with it. - - *label* can be a maximum of 64 characters. - - Maps to KEYLAB2 on z/OS. - - | **required**: True - | **type**: str - - - encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - - *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. - - Maps to KEYCD2 on z/OS. - - | **required**: True - | **type**: str - | **choices**: l, h - - - - key_length - The length of the keys used in a new data set. - - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. - - Valid values are (0-255 non-vsam), (1-255 vsam). - - | **required**: False - | **type**: int - - - key_offset - The position of the first byte of the record key in each logical record of a new VSAM data set. - - The first byte of a logical record is position 0. - - Provide *key_offset* only for VSAM key-sequenced data sets. - - | **required**: False - | **type**: int - - - record_length - The logical record length. (e.g ``80``). - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). - - Maps to LRECL on z/OS. - - | **required**: False - | **type**: int - - - record_format - The format and characteristics of the records for new data set. - - | **required**: False - | **type**: str - | **choices**: u, vb, vba, fb, fba - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the data set on the z/OS system. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the data set. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_unix - The path to a file in UNIX System Services (USS). - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - path - The path to an existing UNIX file. - - Or provide the path to an new created UNIX file when *status_group=OCREAT*. - - The provided path must be absolute. - - | **required**: True - | **type**: str - - - disposition_normal - Indicates what to do with the UNIX file after normal termination of the program. - - | **required**: False - | **type**: str - | **choices**: keep, delete - - - disposition_abnormal - Indicates what to do with the UNIX file after abnormal termination of the program. - - | **required**: False - | **type**: str - | **choices**: keep, delete - - - mode - The file access attributes when the UNIX file is created specified in *path*. - - Specify the mode as an octal number similarly to chmod. - - Maps to PATHMODE on z/OS. - - | **required**: False - | **type**: int - - - status_group - The status for the UNIX file specified in *path*. - - If you do not specify a value for the *status_group* parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. - - Maps to PATHOPTS status group file options on z/OS. - - You can specify up to 6 choices. - - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. - - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. - - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. - - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - - *ononblock* specifies the following, depending on the type of file - - For a FIFO special file - - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. - - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. - - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. - - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. - - 5. For a character special file that supports nonblocking open - - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. - - *ononblock* has no effect on other file types. - - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. - - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. - - | **required**: False - | **type**: list - | **elements**: str - | **choices**: oappend, ocreat, oexcl, onoctty, ononblock, osync, otrunc - - - access_group - The kind of access to request for the UNIX file specified in *path*. - - | **required**: False - | **type**: str - | **choices**: r, w, rw, read_only, write_only, read_write, ordonly, owronly, ordwr - - - file_data_type - The type of data that is (or will be) stored in the file specified in *path*. - - Maps to FILEDATA on z/OS. - - | **required**: False - | **type**: str - | **default**: binary - | **choices**: binary, text, record - - - block_size - The block size, in bytes, for the UNIX file. - - Default is dependent on *record_format* - - | **required**: False - | **type**: int - - - record_length - The logical record length for the UNIX file. - - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. - - Maps to LRECL on z/OS. - - | **required**: False - | **type**: int - - - record_format - The record format for the UNIX file. - - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. - - | **required**: False - | **type**: str - | **choices**: u, vb, vba, fb, fba - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the file on the z/OS system. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the file. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_input - *dd_input* is used to specify an in-stream data set. - - Input will be saved to a temporary data set with a record length of 80. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - content - The input contents for the DD. - - *dd_input* supports single or multiple lines of input. - - Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. - - If a list of strings is provided, newlines will be added to each of the lines when used as input. - - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. - - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. - - | **required**: True - | **type**: raw - - - reserved_cols - Determines how many columns at the beginning of the content are reserved with empty spaces. - - | **required**: False - | **type**: int - | **default**: 2 - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the data set on the z/OS system. - - for *dd_input*, *src_encoding* should generally not need to be changed. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the data set. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_output - Use *dd_output* to specify - Content sent to the DD should be returned to the user. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: True - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the data set on the z/OS system. - - for *dd_input*, *src_encoding* should generally not need to be changed. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the data set. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_dummy - Use *dd_dummy* to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. - - *dd_dummy* accepts no content input. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - - dd_vio - *dd_vio* is used to handle temporary data sets. - - VIO data sets reside in the paging space; but, to the problem program and the access method, the data sets appear to reside on a direct access storage device. - - You cannot use VIO for permanent data sets, VSAM data sets, or partitioned data sets extended (PDSEs). - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - - dd_volume - Use *dd_volume* to specify the volume to use in the DD statement. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - volume_name - The volume serial number. - - | **required**: True - | **type**: str - - - unit - Device type for the volume. - - This option is case sensitive. - - | **required**: True - | **type**: str - - - disposition - *disposition* indicates the status of a data set. - - | **required**: True - | **type**: str - | **choices**: new, shr, mod, old - - - - dd_concat - *dd_concat* is used to specify a data set concatenation. - - | **required**: False - | **type**: dict - - - dd_name - The DD name. - - | **required**: True - | **type**: str - - - dds - A list of DD statements, which can contain any of the following types: *dd_data_set*, *dd_unix*, and *dd_input*. - - | **required**: False - | **type**: list - | **elements**: dict - - - dd_data_set - Specify a data set. - - *dd_data_set* can reference an existing data set. The data set referenced with ``data_set_name`` must be allocated before the module `zos_mvs_raw <./zos_mvs_raw.html>`_ is run, you can use `zos_data_set <./zos_data_set.html>`_ to allocate a data set. - - | **required**: False - | **type**: dict - - - data_set_name - The data set name. - - A data set name can be a GDS relative name. - - When using GDS relative name and it is a positive generation, *disposition=new* must be used. - - | **required**: False - | **type**: str - - - raw - Create a new data set and let the MVS program assign its own default DCB attributes. - - When ``raw=true``, all supplied DCB attributes like disposition, space, volumes, SMS, keys, record settings, etc. are ignored. - - Using ``raw`` option is not possible for all programs, use this for cases where the MVS program that is called is able to assign its own default dataset attributes. - - | **required**: False - | **type**: bool - | **default**: False - - - type - The data set type. Only required when *disposition=new*. - - Maps to DSNTYPE on z/OS. - - | **required**: False - | **type**: str - | **choices**: library, pds, pdse, large, basic, seq, rrds, esds, lds, ksds - - - disposition - *disposition* indicates the status of a data set. - - Defaults to shr. - - | **required**: False - | **type**: str - | **choices**: new, shr, mod, old - - - disposition_normal - *disposition_normal* indicates what to do with the data set after normal termination of the program. - - | **required**: False - | **type**: str - | **choices**: delete, keep, catalog, uncatalog - - - disposition_abnormal - *disposition_abnormal* indicates what to do with the data set after abnormal termination of the program. - - | **required**: False - | **type**: str - | **choices**: delete, keep, catalog, uncatalog - - - reuse - Determines if data set should be reused if *disposition=new* and a data set with matching name already exists. - - If *reuse=true*, *disposition* will be automatically switched to ``SHR``. - - If *reuse=false*, and a data set with a matching name already exists, allocation will fail. - - Mutually exclusive with *replace*. - - *reuse* is only considered when *disposition=new* - - | **required**: False - | **type**: bool - | **default**: False - - - replace - Determines if data set should be replaced if *disposition=new* and a data set with matching name already exists. - - If *replace=true*, the original data set will be deleted, and a new data set created. - - If *replace=false*, and a data set with a matching name already exists, allocation will fail. - - Mutually exclusive with *reuse*. - - *replace* is only considered when *disposition=new* - - *replace* will result in loss of all data in the original data set unless *backup* is specified. - - | **required**: False - | **type**: bool - | **default**: False - - - backup - Determines if a backup should be made of existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. - - *backup* is only used when *replace=true*. - - | **required**: False - | **type**: bool - | **default**: False - - - space_type - The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. - - | **required**: False - | **type**: str - | **choices**: trk, cyl, b, k, m, g - - - space_primary - The primary amount of space to allocate for a new data set. - - The value provided to *space_type* is used as the unit of space for the allocation. - - Not applicable when *space_type=blklgth* or *space_type=reclgth*. - - | **required**: False - | **type**: int - - - space_secondary - When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. - - The value provided to *space_type* is used as the unit of space for the allocation. - - Not applicable when *space_type=blklgth* or *space_type=reclgth*. - - | **required**: False - | **type**: int - - - volumes - The volume or volumes on which a data set resides or will reside. - - Do not specify the same volume multiple times. - - | **required**: False - | **type**: raw - - - sms_management_class - The desired management class for a new SMS-managed data set. - - *sms_management_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - sms_storage_class - The desired storage class for a new SMS-managed data set. - - *sms_storage_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - sms_data_class - The desired data class for a new SMS-managed data set. - - *sms_data_class* is ignored if specified for an existing data set. - - All values must be between 1-8 alpha-numeric characters. - - | **required**: False - | **type**: str - - - block_size - The maximum length of a block in bytes. - - Default is dependent on *record_format* - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - key_label - The label for the encryption key used by the system to encrypt the data set. - - *key_label* is the public name of a protected encryption key in the ICSF key repository. - - *key_label* should only be provided when creating an extended format data set. - - Maps to DSKEYLBL on z/OS. - - | **required**: False - | **type**: str - - - encryption_key_1 - The encrypting key used by the Encryption Key Manager. - - Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. - - | **required**: False - | **type**: dict - - - label - The label for the key encrypting key used by the Encryption Key Manager. - - Key label must have a private key associated with it. - - *label* can be a maximum of 64 characters. - - Maps to KEYLAB1 on z/OS. - - | **required**: True - | **type**: str - - - encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - - *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. - - Maps to KEYCD1 on z/OS. - - | **required**: True - | **type**: str - | **choices**: l, h - - - - encryption_key_2 - The encrypting key used by the Encryption Key Manager. - - Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. - - | **required**: False - | **type**: dict - - - label - The label for the key encrypting key used by the Encryption Key Manager. - - Key label must have a private key associated with it. - - *label* can be a maximum of 64 characters. - - Maps to KEYLAB2 on z/OS. - - | **required**: True - | **type**: str - - - encoding - How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. - - *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. - - Maps to KEYCD2 on z/OS. - - | **required**: True - | **type**: str - | **choices**: l, h - - - - key_length - The length of the keys used in a new data set. - - If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. - - Valid values are (0-255 non-vsam), (1-255 vsam). - - | **required**: False - | **type**: int - - - key_offset - The position of the first byte of the record key in each logical record of a new VSAM data set. - - The first byte of a logical record is position 0. - - Provide *key_offset* only for VSAM key-sequenced data sets. - - | **required**: False - | **type**: int - - - record_length - The logical record length. (e.g ``80``). - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - Valid values are (1-32760 for non-vsam, 1-32761 for vsam). - - Maps to LRECL on z/OS. - - | **required**: False - | **type**: int - - - record_format - The format and characteristics of the records for new data set. - - | **required**: False - | **type**: str - | **choices**: u, vb, vba, fb, fba - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the data set on the z/OS system. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the data set. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_unix - The path to a file in UNIX System Services (USS). - - | **required**: False - | **type**: dict - - - path - The path to an existing UNIX file. - - Or provide the path to an new created UNIX file when *status_group=ocreat*. - - The provided path must be absolute. - - | **required**: True - | **type**: str - - - disposition_normal - Indicates what to do with the UNIX file after normal termination of the program. - - | **required**: False - | **type**: str - | **choices**: keep, delete - - - disposition_abnormal - Indicates what to do with the UNIX file after abnormal termination of the program. - - | **required**: False - | **type**: str - | **choices**: keep, delete - - - mode - The file access attributes when the UNIX file is created specified in *path*. - - Specify the mode as an octal number similar to chmod. - - Maps to PATHMODE on z/OS. - - | **required**: False - | **type**: int - - - status_group - The status for the UNIX file specified in *path*. - - If you do not specify a value for the *status_group* parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. - - Maps to PATHOPTS status group file options on z/OS. - - You can specify up to 6 choices. - - *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. - - *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. - - *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. - - *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. - - *ononblock* specifies the following, depending on the type of file - - For a FIFO special file - - 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. - - 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. - - 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. - - 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. - - 5. For a character special file that supports nonblocking open - - 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. - - 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. - - *ononblock* has no effect on other file types. - - *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. - - *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. - - When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. - - | **required**: False - | **type**: list - | **elements**: str - | **choices**: oappend, ocreat, oexcl, onoctty, ononblock, osync, otrunc - - - access_group - The kind of access to request for the UNIX file specified in *path*. - - | **required**: False - | **type**: str - | **choices**: r, w, rw, read_only, write_only, read_write, ordonly, owronly, ordwr - - - file_data_type - The type of data that is (or will be) stored in the file specified in *path*. - - Maps to FILEDATA on z/OS. - - | **required**: False - | **type**: str - | **default**: binary - | **choices**: binary, text, record - - - block_size - The block size, in bytes, for the UNIX file. - - Default is dependent on *record_format* - - | **required**: False - | **type**: int - - - record_length - The logical record length for the UNIX file. - - *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. - - Maps to LRECL on z/OS. - - | **required**: False - | **type**: int - - - record_format - The record format for the UNIX file. - - *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. - - | **required**: False - | **type**: str - | **choices**: u, vb, vba, fb, fba - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the file on the z/OS system. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the file. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - dd_input - *dd_input* is used to specify an in-stream data set. - - Input will be saved to a temporary data set with a record length of 80. - - | **required**: False - | **type**: dict - - - content - The input contents for the DD. - - *dd_input* supports single or multiple lines of input. - - Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. - - If a list of strings is provided, newlines will be added to each of the lines when used as input. - - If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. - - When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. - - | **required**: True - | **type**: raw - - - reserved_cols - Determines how many columns at the beginning of the content are reserved with empty spaces. - - | **required**: False - | **type**: int - | **default**: 2 - - - return_content - Determines how content should be returned to the user. - - If not provided, no content from the DD is returned. - - | **required**: False - | **type**: dict - - - type - The type of the content to be returned. - - ``text`` means return content in encoding specified by *response_encoding*. - - *src_encoding* and *response_encoding* are only used when *type=text*. - - ``base64`` means return content as base64 encoded in binary. - - | **required**: True - | **type**: str - | **choices**: text, base64 - - - src_encoding - The encoding of the data set on the z/OS system. - - for *dd_input*, *src_encoding* should generally not need to be changed. - - | **required**: False - | **type**: str - | **default**: ibm-1047 - - - response_encoding - The encoding to use when returning the contents of the data set. - - | **required**: False - | **type**: str - | **default**: iso8859-1 - - - - - - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: List data sets matching pattern in catalog, - save output to a new sequential data set and return output as text. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: mypgm.output.ds - disposition: new - reuse: true - type: seq - space_primary: 5 - space_secondary: 1 - space_type: m - volumes: - - "000000" - record_format: fb - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: Run ADRDSSU to dump a dataset without having to specify the DCB attributes for dd_data_set by using raw option. - zos_mvs_raw: - program_name: ADRDSSU - auth: true - verbose: true - dds: - - dd_data_set: - dd_name: OUTDD - data_set_name: "USER.TEST.DUMP" - raw: true - - dd_input: - dd_name: SYSIN - content: | - DUMP DATASET(INCLUDE(USER.TEST.SOURCE)) - - OUTDDNAME(OUTDD) - - dd_output: - dd_name: SYSPRINT - return_content: - type: text - - - name: Full volume dump using ADDRDSU. - zos_mvs_raw: - program_name: adrdssu - auth: true - dds: - - dd_data_set: - dd_name: dumpdd - data_set_name: mypgm.output.ds - disposition: new - disposition_normal: catalog - disposition_abnormal: delete - space_type: cyl - space_primary: 10 - space_secondary: 10 - record_format: u - record_length: 0 - block_size: 32760 - type: seq - - dd_volume: - dd_name: voldd - volume_name: "000000" - unit: "3390" - disposition: old - - dd_input: - dd_name: sysin - content: " VOLDUMP VOL(voldd) DSNAME(dumpdd) FULL" - - dd_output: - dd_name: sysprint - return_content: - type: text - - - name: List data sets matching patterns in catalog, - save output to a new sequential data set and return output as text. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: mypgm.output.ds - disposition: new - reuse: true - type: seq - space_primary: 5 - space_secondary: 1 - space_type: m - volumes: - - "000000" - record_format: fb - return_content: - type: text - - dd_input: - dd_name: sysin - content: - - LISTCAT ENTRIES('SOME.DATASET.*') - - LISTCAT ENTRIES('SOME.OTHER.DS.*') - - LISTCAT ENTRIES('YET.ANOTHER.DS.*') - - - name: List data sets matching pattern in catalog, - save output to an existing sequential data set and - return output as text. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: mypgm.output.ds - disposition: shr - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - save output to a sequential data set. If the data set exists, - then reuse it, if it does not exist, create it. Returns output as text. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: mypgm.output.ds - disposition: new - reuse: true - type: seq - space_primary: 5 - space_secondary: 1 - space_type: m - volumes: - - "000000" - record_format: fb - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - save output to a sequential data set. If the data set exists, - then back up the existing data set and replace it. - If the data set does not exist, create it. - Returns backup name (if a backup was made) and output as text, - and backup name. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: mypgm.output.ds - disposition: new - replace: true - backup: true - type: seq - space_primary: 5 - space_secondary: 1 - space_type: m - volumes: - - "000000" - - "111111" - - "SCR002" - record_format: fb - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - save output to a file in UNIX System Services. - zos_raw: - save output to a file in UNIX System Services. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_unix: - dd_name: sysprint - path: /u/myuser/outputfile.txt - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - save output to a file in UNIX System Services. - Return the contents of the file in encoding IBM-1047, - while the file is encoded in ISO8859-1. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_unix: - dd_name: sysprint - path: /u/myuser/outputfile.txt - return_content: - type: text - src_encoding: iso8859-1 - response_encoding: ibm-1047 - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - return output to user, but don't store in persistent storage. - Return the contents of the file in encoding IBM-1047, - while the file is encoded in ISO8859-1. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_output: - dd_name: sysprint - return_content: - type: text - src_encoding: iso8859-1 - response_encoding: ibm-1047 - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: Take a set of data sets and write them to an archive. - zos_mvs_raw: - program_name: adrdssu - auth: true - dds: - - dd_data_set: - dd_name: archive - data_set_name: myhlq.stor.darv1 - disposition: old - - dd_data_set: - dd_name: sysin - data_set_name: myhlq.adrdssu.cmd - disposition: shr - - dd_dummy: - dd_name: sysprint - - - name: Merge two sequential data sets and write them to new data set - zos_mvs_raw: - program_name: sort - auth: false - parm: "MSGPRT=CRITICAL,LIST" - dds: - - dd_data_set: - dd_name: sortin01 - data_set_name: myhlq.dfsort.main - disposition: shr - - dd_data_set: - dd_name: sortin02 - data_set_name: myhlq.dfsort.new - - dd_input: - dd_name: sysin - content: " MERGE FORMAT=CH,FIELDS=(1,9,A)" - - dd_data_set: - dd_name: sortout - data_set_name: myhlq.dfsort.merge - type: seq - disposition: new - - dd_unix: - dd_name: sysout - path: /tmp/sortpgmoutput.txt - mode: 644 - status_group: - - ocreat - access_group: w - - - name: List data sets matching a pattern in catalog, - save output to a concatenation of data set members and - files. - zos_mvs_raw: - pgm: idcams - auth: true - dds: - - dd_concat: - dd_name: sysprint - dds: - - dd_data_set: - data_set_name: myhlq.ds1.out(out1) - - dd_data_set: - data_set_name: myhlq.ds1.out(out2) - - dd_data_set: - data_set_name: myhlq.ds1.out(out3) - - dd_unix: - path: /tmp/overflowout.txt - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SYS1.*')" - - - name: Drop the contents of input dataset into output dataset using REPRO command. - zos_mvs_raw: - pgm: idcams - auth: true - dds: - - dd_data_set: - dd_name: INPUT - data_set_name: myhlq.ds1.input - - dd_data_set: - dd_name: OUTPUT - data_set_name: myhlq.ds1.output - - dd_input: - dd_name: sysin - content: | - " REPRO - - INFILE(INPUT) - - OUTFILE(OUTPUT)" - - dd_output: - dd_name: sysprint - return_content: - type: text - - - name: Define a cluster using a literal block style indicator - with a 2 space indentation. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_output: - dd_name: sysprint - return_content: - type: text - - dd_input: - dd_name: sysin - content: 2 - DEFINE CLUSTER - - (NAME(ANSIBLE.TEST.VSAM) - - CYL(10 10) - - FREESPACE(20 20) - - INDEXED - - KEYS(32 0) - - NOERASE - - NONSPANNED - - NOREUSE - - SHAREOPTIONS(3 3) - - SPEED - - UNORDERED - - RECORDSIZE(4086 32600) - - VOLUMES(222222) - - UNIQUE) - - - name: Simple FTP connection using frist and second columns. - zos_mvs_raw: - program_name: AMAPDUPL - auth: true - dds: - - dd_output: - dd_name: sysprint - return_content: - type: text - - dd_data_set: - dd_name: SYSUT1 - data_set_name: myhlq.ds1.output - disposition: shr - - dd_input: - dd_name: sysin - reserved_cols: 0 - content: | - USERID=anonymous - PASSWORD=anonymous - TARGET_SYS=testcase.boulder.ibm.com - TARGET_DSN=wessamp.bigfile - - - name: List data sets matching pattern in catalog, - save output to a new generation of gdgs. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: TEST.CREATION(+1) - disposition: new - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: List data sets matching pattern in catalog, - save output to a gds already created. - zos_mvs_raw: - program_name: idcams - auth: true - dds: - - dd_data_set: - dd_name: sysprint - data_set_name: TEST.CREATION(-2) - return_content: - type: text - - dd_input: - dd_name: sysin - content: " LISTCAT ENTRIES('SOME.DATASET.*')" - - - name: Recall a migrated data set. - zos_mvs_raw: - program_name: ikjeft01 - auth: true - dds: - - dd_output: - dd_name: systsprt - return_content: - type: text - - dd_input: - dd_name: systsin - content: - - "HRECALL 'MY.DATASET' WAIT" - - - - -Notes ------ - -.. note:: - When executing programs using `zos_mvs_raw <./zos_mvs_raw.html>`_, you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. - - 1. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. - - 2. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. - - 3. When executing a program, refer to the programs documentation as each programs requirments can vary fom DDs, instream-data indentation and continuation characters. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_data_set_module` - - - - -Return Values -------------- - - -ret_code - The return code. - - | **returned**: always - | **type**: dict - - code - The return code number returned from the program. - - | **type**: int - - -dd_names - All the related dds with the program. - - | **returned**: on success - | **type**: list - | **elements**: dict - - dd_name - The data definition name. - - | **type**: str - - name - The data set or path name associated with the data definition. - - | **type**: str - - content - The content contained in the data definition. - - | **type**: list - | **elements**: str - - record_count - The lines of the content. - - | **type**: int - - byte_count - The number of bytes in the response content. - - | **type**: int - - -backups - List of any data set backups made during execution. - - | **returned**: always - | **type**: dict - - original_name - The original data set name for which a backup was made. - - | **type**: str - - backup_name - The name of the data set containing the backup of content from data set in original_name. - - | **type**: str - - -stdout - The stdout from a USS command or MVS command, if applicable. - - | **returned**: always - | **type**: str - -stderr - The stderr of a USS command or MVS command, if applicable. - - | **returned**: failure - | **type**: str - diff --git a/docs/source/modules/zos_operator.rst b/docs/source/modules/zos_operator.rst deleted file mode 100644 index 8710256f74..0000000000 --- a/docs/source/modules/zos_operator.rst +++ /dev/null @@ -1,212 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_operator.py - -.. _zos_operator_module: - - -zos_operator -- Execute operator command -======================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Execute an operator command and receive the output. - - - - - -Parameters ----------- - - -cmd - The command to execute. - - If the command contains single-quotations, another set of single quotes must be added. - - For example, change the command "...,P='DSN3EPX,-DBC1,S'" to "...,P=''DSN3EPX,-DBC1,S'' ". - - If the command contains any special characters ($, &, etc), they must be escaped using double backslashes like \\\\\\$. - - For example, to display job by job name the command would be ``cmd:"\\$dj''HELLO''"`` - - By default, the command will be converted to uppercase before execution, to control this behavior, see the *case_sensitive* option below. - - | **required**: True - | **type**: str - - -verbose - Return diagnostic messages that describes the commands execution, options, buffer and response size. - - | **required**: False - | **type**: bool - | **default**: False - - -wait_time_s - Set maximum time in seconds to wait for the commands to execute. - - When set to 0, the system default is used. - - This option is helpful on a busy system requiring more time to execute commands. - - Setting *wait* can instruct if execution should wait the full *wait_time_s*. - - | **required**: False - | **type**: int - | **default**: 1 - - -case_sensitive - If ``true``, the command will not be converted to uppercase before execution. Instead, the casing will be preserved just as it was written in a task. - - | **required**: False - | **type**: bool - | **default**: False - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Execute an operator command to show device status and allocation - zos_operator: - cmd: 'd u' - - - name: Execute an operator command to show device status and allocation with verbose information - zos_operator: - cmd: 'd u' - verbose: true - - - name: Execute an operator command to purge all job logs (requires escaping) - zos_operator: - cmd: "\\$PJ(*)" - - - name: Execute operator command to show jobs, always waiting 5 seconds for response - zos_operator: - cmd: 'd a,all' - wait_time_s: 5 - - - name: Display the system symbols and associated substitution texts. - zos_operator: - cmd: 'D SYMBOLS' - - - - -Notes ------ - -.. note:: - Commands may need to use specific prefixes like $, they can be discovered by issuing the following command ``D OPDATA,PREFIX``. - - - - - - - -Return Values -------------- - - -rc - Return code for the submitted operator command. - - | **returned**: always - | **type**: int - -cmd - Operator command submitted. - - | **returned**: always - | **type**: str - | **sample**: d u,all - -elapsed - The number of seconds that elapsed waiting for the command to complete. - - | **returned**: always - | **type**: float - | **sample**: - - .. code-block:: json - - 51.53 - -wait_time_s - The maximum time in seconds to wait for the commands to execute. - - | **returned**: always - | **type**: int - | **sample**: 5 - -content - The resulting text from the command submitted. - - | **returned**: on success - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "EC33017A 2022244 16:00:49.00 ISF031I CONSOLE OMVS0000 ACTIVATED", - "EC33017A 2022244 16:00:49.00 -D U,ALL ", - "EC33017A 2022244 16:00:49.00 IEE457I 16.00.49 UNIT STATUS 645", - " UNIT TYPE STATUS VOLSER VOLSTATE SS", - " 0000 3390 F-NRD /RSDNT 0", - " 0001 3211 OFFLINE 0", - " 0002 3211 OFFLINE 0", - " 0003 3211 OFFLINE 0", - " 0004 3211 OFFLINE 0", - " 0005 3211 OFFLINE 0", - " 0006 3211 OFFLINE 0", - " 0007 3211 OFFLINE 0", - " 0008 3211 OFFLINE 0", - " 0009 3277 OFFLINE 0", - " 000C 2540 A 0", - " 000D 2540 A 0", - " 000E 1403 A 0", - " 000F 1403 A 0", - " 0010 3211 A 0", - " 0011 3211 A 0" - ] - -changed - Indicates if any changes were made during module operation. Given operator commands may introduce changes that are unknown to the module. True is always returned unless either a module or command failure has occurred. - - | **returned**: always - | **type**: bool - | **sample**: - - .. code-block:: json - - true - diff --git a/docs/source/modules/zos_operator_action_query.rst b/docs/source/modules/zos_operator_action_query.rst deleted file mode 100644 index 350f87266b..0000000000 --- a/docs/source/modules/zos_operator_action_query.rst +++ /dev/null @@ -1,259 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_operator_action_query.py - -.. _zos_operator_action_query_module: - - -zos_operator_action_query -- Display messages requiring action -============================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Get a list of outstanding messages requiring operator action given one or more conditions. - - - - - -Parameters ----------- - - -system - Return outstanding messages requiring operator action awaiting a reply for a particular system. - - If the system name is not specified, all outstanding messages for that system and for the local systems attached to it are returned. - - A trailing asterisk, (*) wildcard is supported. - - | **required**: False - | **type**: str - - -message_id - Return outstanding messages requiring operator action awaiting a reply for a particular message identifier. - - If the message identifier is not specified, all outstanding messages for all message identifiers are returned. - - A trailing asterisk, (*) wildcard is supported. - - | **required**: False - | **type**: str - - -job_name - Return outstanding messages requiring operator action awaiting a reply for a particular job name. - - If the message job name is not specified, all outstanding messages for all job names are returned. - - A trailing asterisk, (*) wildcard is supported. - - | **required**: False - | **type**: str - - -message_filter - Return outstanding messages requiring operator action awaiting a reply that match a regular expression (regex) filter. - - If the message filter is not specified, all outstanding messages are returned regardless of their content. - - | **required**: False - | **type**: dict - - - filter - Specifies the substring or regex to match to the outstanding messages, see *use_regex*. - - All special characters in a filter string that are not a regex are escaped. - - Valid Python regular expressions are supported. See `the official documentation `_ for more information. - - Regular expressions are compiled with the flag **re.DOTALL** which makes the **'.'** special character match any character including a newline." - - | **required**: True - | **type**: str - - - use_regex - Indicates that the value for *filter* is a regex or a string to match. - - If False, the module assumes that *filter* is not a regex and matches the *filter* substring on the outstanding messages. - - If True, the module creates a regex from the *filter* string and matches it to the outstanding messages. - - | **required**: False - | **type**: bool - | **default**: False - - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Display all outstanding messages issued on system MV2H - zos_operator_action_query: - system: mv2h - - - name: Display all outstanding messages whose job name begin with im5 - zos_operator_action_query: - job_name: im5* - - - name: Display all outstanding messages whose message id begin with dsi* - zos_operator_action_query: - message_id: dsi* - - - name: Display all outstanding messages that have the text IMS READY in them - zos_operator_action_query: - message_filter: - filter: IMS READY - - - name: Display all outstanding messages where the job name begins with 'mq', - message ID begins with 'dsi', on system 'mv29' and which contain the - pattern 'IMS' - zos_operator_action_query: - job_name: mq* - message_id: dsi* - system: mv29 - message_filter: - filter: ^.*IMS.*$ - use_regex: true - - - - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_operator_module` - - - - -Return Values -------------- - - -changed - Indicates if any changes were made during module operation. Given operator action commands query for messages, True is always returned unless either a module or command failure has occurred. - - | **returned**: always - | **type**: bool - -count - The total number of outstanding messages. - - | **returned**: on success - | **type**: int - | **sample**: 12 - -actions - The list of the outstanding messages. - - | **returned**: success - | **type**: list - | **elements**: dict - | **sample**: - - .. code-block:: json - - [ - { - "job_id": "STC01537", - "job_name": "IM5HCONN", - "message_id": "HWSC0000I", - "message_text": "*399 HWSC0000I *IMS CONNECT READY* IM5HCONN", - "number": "001", - "system": "MV27", - "type": "R" - }, - { - "job_id": "STC01533", - "job_name": "IM5HCTRL", - "message_id": "DFS3139I", - "message_text": "*400 DFS3139I IMS INITIALIZED, AUTOMATIC RESTART PROCEEDING IM5H", - "number": "002", - "system": "MV27", - "type": "R" - } - ] - - number - The message identification number. - - | **returned**: on success - | **type**: int - | **sample**: 1 - - type - The action type,'R' means request. - - | **returned**: on success - | **type**: str - | **sample**: R - - system - System on which the outstanding message requiring operator action awaiting a reply. - - | **returned**: on success - | **type**: str - | **sample**: MV27 - - job_id - Job identifier for the outstanding message requiring operator action awaiting a reply. - - | **returned**: on success - | **type**: str - | **sample**: STC01537 - - message_text - Content of the outstanding message requiring operator action awaiting a reply. If *message_filter* is set, *message_text* will be filtered accordingly. - - | **returned**: success - | **type**: str - | **sample**: *399 HWSC0000I *IMS CONNECT READY* IM5HCONN - - job_name - Job name for outstanding message requiring operator action awaiting a reply. - - | **returned**: success - | **type**: str - | **sample**: IM5HCONN - - message_id - Message identifier for outstanding message requiring operator action awaiting a reply. - - | **returned**: success - | **type**: str - | **sample**: HWSC0000I - - diff --git a/docs/source/modules/zos_ping.rst b/docs/source/modules/zos_ping.rst deleted file mode 100644 index e98f5439a8..0000000000 --- a/docs/source/modules/zos_ping.rst +++ /dev/null @@ -1,88 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_ping.py - -.. _zos_ping_module: - - -zos_ping -- Ping z/OS and check dependencies. -============================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- `zos_ping <./zos_ping.html>`_ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. -- `zos_ping <./zos_ping.html>`_ returns ``pong`` when the target host is not missing any required dependencies. -- If the target host is missing optional dependencies, the `zos_ping <./zos_ping.html>`_ will return one or more warning messages. -- If a required dependency is missing from the target host, an explanatory message will be returned with the module failure. - - - - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Ping the z/OS host and perform resource checks - zos_ping: - register: result - - - - -Notes ------ - -.. note:: - This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry ``scp_extra_args="-O"`` into the ini file named ``ansible.cfg``. - - For more information, review the `ansible.builtin.ssh `_ module. - - - - - - - -Return Values -------------- - - -ping - Should contain the value "pong" on success. - - | **returned**: always - | **type**: str - | **sample**: pong - -warnings - List of warnings returned from stderr when performing resource checks. - - | **returned**: failure - | **type**: list - | **elements**: str - diff --git a/docs/source/modules/zos_replace.rst b/docs/source/modules/zos_replace.rst deleted file mode 100644 index 70b2adf2f7..0000000000 --- a/docs/source/modules/zos_replace.rst +++ /dev/null @@ -1,304 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_replace.py - -.. _zos_replace_module: - - -zos_replace -- Replace all instances of a pattern within a file or data set. -============================================================================ - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The module `zos_replace. `_ can replace all instances of a pattern in the contents of a data set. - - - - - -Parameters ----------- - - -after - A regular expression that, if specified, determines which content will be replaced or removed **after** the match. - - Option *after* is the start position from where the module will seek to match the *regexp* pattern. When a pattern is matched, occurrences are substituted with the value set for *replace*. - - If option *after* is not set, the module will search from the beginning of the *target*. - - Option *after* is a regular expression as described in the `Python library `_. - - Option *after* can be used in combination with *before*. When combined with *before*, patterns are replaced or removed from *after* until the value set for *before*. - - Option *after* can be interpreted as a literal string instead of a regular expression by setting option *literal=after*. - - | **required**: False - | **type**: str - - -backup - Specifies whether a backup of the destination should be created before editing the source *target*. - - When set to ``true``, the module creates a backup file or data set. - - The backup file name will be returned if *backup* is ``true`` on either success or failure of module execution such that data can be retrieved. - - | **required**: False - | **type**: bool - | **default**: False - - -backup_name - Specify the USS file name or data set name for the destination backup. - - If *src* is a USS file or path, backup_name must be a file or path name, and it must be an absolute path name. - - If the source is an MVS data set, *backup_name* must be an MVS data set name, and the data set must **not** be preallocated. - - If it is a Generation Data Set (GDS), use a relative positive name, e.g., *SOME.CREATION(+1*). - - If *backup_name* is not provided, a default name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. - - If *src* is a seq data set and backup_name is not provided, the data set will be backed up to seq data set with a randomly generated name. - - If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. - - If *src* is a Generation Data Set (GDS) and backup_name is not provided, backup will be a sequential data set. - - | **required**: False - | **type**: str - - -before - A regular expression that if, specified, determines which content will be replaced or removed **before** the match. - - Option *before* is the end position from where the module will seek to match the *regexp* pattern. When a pattern is matched, occurrences are substituted with the value set for *replace*. - - If option *before* is not set, the module will search to the end of the *target*. - - Option *before* is a regular expression as described in the `Python library `_. - - Option *before* can be used in combination with *after*. When combined with *after*, patterns are replaced or removed from *after* until the value set for *before*. - - Option *before* can be interpreted as a literal string instead of a regular expression by setting option *literal=before*. - - | **required**: False - | **type**: str - - -encoding - The character set for data in the *target*. Module `zos_replace <./zos_replace.html>`_ requires the encoding to correctly read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. - - Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. - - | **required**: False - | **type**: str - | **default**: IBM-1047 - - -literal - If specified, it enables the module to interpret options *after*, *before* and *regexp* as a literal rather than a regular expression. - - Option *literal* uses any combination of V(after), V(before) and V(regexp). - - To interpret one option as a literal, use *literal=regexp*, *literal=after* or *literal=before*. - - To interpret multiple options as a literal, use a list such as ``['after', 'before']`` or ``['regex', 'after', 'before']`` - - | **required**: False - | **type**: raw - | **default**: [] - - -target - The location can be a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE. - - The USS file must be an absolute pathname. - - It is possible to use a generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). - - | **required**: True - | **type**: str - - -tmp_hlq - Override the default High Level Qualifier (HLQ) for temporary and backup data sets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value of ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -regexp - The regular expression to look for in the contents of the file. - - | **required**: True - | **type**: str - - -replace - The string to replace *regexp* matches with. - - If not set, matches are removed entirely. - - | **required**: False - | **type**: str - - - - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Replace 'profile/' pattern in USS file via blank substitution. - zos_replace: - target: /tmp/src/somefile - regexp: 'profile\/' - - - name: Replace regexp match with blank after line match in USS file. - zos_replace: - target: "/tmp/source" - regexp: '^MOUNTPOINT*' - after: export ZOAU_ROOT - - - name: Replace a specific line with special character on a dataset after a line, treating the text specified - for regexp as a literal string and after as regular expression. - zos_replace: - target: SAMPLE.SOURCE - regexp: //*LIB DD UNIT=SYS,SPACE=(TRK,(1,1)),VOL=SER=vvvvvv - replace: //*LIB DD UNIT=SYS,SPACE=(CYL,(1,1)) - after: '^\$source base \([^\s]+\)' - literal: regexp - - - name: Replace a specific line with special character on a dataset after a line, treating the text specified - for regexp and after as regular expression. - zos_replace: - target: SAMPLE.SOURCE - regexp: '\ \*\*LIB\ \ DD\ UNIT=SYS,SPACE=\(TRK,\(1,1\)\),VOL=SER=vvvvvv' - replace: //*LIB DD UNIT=SYS,SPACE=(CYL,(1,1)) - after: '^\$source base \([^\s]+\)' - literal: regexp - - - name: Replace a specific line before a specific sentence with backup, treating the text specified for regexp and before as literal strings. - zos_replace: - target: SAMPLE.SOURCE - backup: true - regexp: //SYSPRINT DD SYSOUT=* - before: SAMPLES OUTPUT SYSIN *=$DSN - literal: - - regexp - - before - - - name: Replace a specific line before a specific sentence with backup, treating the text specified for regexp and before as regular expression. - zos_replace: - target: SAMPLE.SOURCE - backup: true - regexp: '\ //SYSPRINT\ DD\ SYSOUT=\*' - before: '\ SAMPLES OUTPUT SYSIN\ \*\=\$DSN' - - - name: Replace 'var' with 'vars' between matched lines after and before with backup. - zos_replace: - target: SAMPLE.DATASET - tmp_hlq: ANSIBLE - backup: true - backup_name: BACKUP.DATASET - regexp: var - replace: vars - after: ^/tmp/source* - before: ^ if* - - - name: Replace lines on a GDS and generate a backup on the same GDG. - zos_replace: - target: SOURCE.GDG(0) - regexp: ^(IEE132I|IEA989I|IEA888I|IEF196I|IEA000I)\s.* - after: ^IEE133I PENDING * - before: ^IEE252I DEVICE * - backup: true - backup_name: "SOURCE.GDG(+1)" - - - name: Delete 'SYSTEM' calls via backref between matched lines in a PDS member. - zos_replace: - target: PDS.SOURCE(MEM) - regexp: '^(.*?SYSTEM.*?)SYSTEM(.*)' - replace: '\1\2' - after: IEE133I PENDING * - before: IEF456I JOB12345 * - - - - -Notes ------ - -.. note:: - For supported character sets used to encode data, refer to the `documentation `_. - - - - - - - -Return Values -------------- - - -backup_name - Name of the backup file or data set that was created. - - | **returned**: if backup=true - | **type**: str - | **sample**: /path/to/file.txt.2015-02-03@04:15 - -changed - Indicates if the source was modified. - - | **returned**: always - | **type**: bool - | **sample**: - - .. code-block:: json - - true - -found - Number of matches found - - | **returned**: success - | **type**: int - | **sample**: 5 - -msg - A string with a generic or error message relayed to the user. - - | **returned**: failure - | **type**: str - | **sample**: Parameter verification failed - -replaced - Fragment of the file that was changed - - | **returned**: always - | **type**: str - | **sample**: IEE134I TRACE DISABLED - MONITORING STOPPED - -target - The data set name or USS path that was modified. - - | **returned**: always - | **type**: str - | **sample**: ANSIBLE.USER.TEXT - diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst deleted file mode 100644 index e85fdb14f0..0000000000 --- a/docs/source/modules/zos_script.rst +++ /dev/null @@ -1,419 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_script.py - -.. _zos_script_module: - - -zos_script -- Run scripts in z/OS -================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The `zos_script <./zos_script.html>`_ module runs a local or remote script in the remote machine. - - - - - -Parameters ----------- - - -chdir - Change the script's working directory to this path. - - When not specified, the script will run in the user's home directory on the remote machine. - - | **required**: False - | **type**: str - - -cmd - Path to the local or remote script followed by optional arguments. - - If the script path contains spaces, make sure to enclose it in two pairs of quotes. - - Arguments may need to be escaped so the shell in the remote machine handles them correctly. - - | **required**: True - | **type**: str - - -creates - Path to a file in the remote machine. If it exists, the script will not be executed. - - | **required**: False - | **type**: str - - -encoding - Specifies which encodings the script should be converted from and to. - - If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. - - | **required**: False - | **type**: dict - - - from - The encoding to be converted from. - - | **required**: True - | **type**: str - - - to - The encoding to be converted to. - - | **required**: True - | **type**: str - - - -executable - Path of an executable in the remote machine to invoke the script with. - - When not specified, the system will assume the script is interpreted REXX and try to run it as such. Make sure to include a comment identifying the script as REXX at the start of the file in this case. - - | **required**: False - | **type**: str - - -remote_src - If set to ``false``, the module will search the script in the controller. - - If set to ``true``, the module will search the script in the remote machine. - - | **required**: False - | **type**: bool - - -removes - Path to a file in the remote machine. If it does not exist, the script will not be executed. - - | **required**: False - | **type**: str - - -use_template - Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. - - Only valid when ``src`` is a local file or directory. - - All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. - - If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ - - | **required**: False - | **type**: bool - | **default**: False - - -template_parameters - Options to set the way Jinja2 will process templates. - - Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. - - These options are ignored unless ``use_template`` is true. - - | **required**: False - | **type**: dict - - - variable_start_string - Marker for the beginning of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: {{ - - - variable_end_string - Marker for the end of a statement to print a variable in Jinja2. - - | **required**: False - | **type**: str - | **default**: }} - - - block_start_string - Marker for the beginning of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: {% - - - block_end_string - Marker for the end of a block in Jinja2. - - | **required**: False - | **type**: str - | **default**: %} - - - comment_start_string - Marker for the beginning of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: {# - - - comment_end_string - Marker for the end of a comment in Jinja2. - - | **required**: False - | **type**: str - | **default**: #} - - - line_statement_prefix - Prefix used by Jinja2 to identify line-based statements. - - | **required**: False - | **type**: str - - - line_comment_prefix - Prefix used by Jinja2 to identify comment lines. - - | **required**: False - | **type**: str - - - lstrip_blocks - Whether Jinja2 should strip leading spaces from the start of a line to a block. - - | **required**: False - | **type**: bool - | **default**: False - - - trim_blocks - Whether Jinja2 should remove the first newline after a block is removed. - - Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. - - | **required**: False - | **type**: bool - | **default**: True - - - keep_trailing_newline - Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. - - | **required**: False - | **type**: bool - | **default**: False - - - newline_sequence - Sequence that starts a newline in a template. - - | **required**: False - | **type**: str - | **default**: \\n - | **choices**: \\n, \\r, \\r\\n - - auto_reload - Whether to reload a template file when it has changed after the task has started. - - | **required**: False - | **type**: bool - | **default**: False - - - autoescape - Whether to enable autoescape of XML/HTML elements on a template. - - | **required**: False - | **type**: bool - | **default**: True - - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. -diff_mode - | **support**: none - | **description**: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Run a local REXX script on the managed z/OS node. - zos_script: - cmd: ./scripts/HELLO - - - name: Run a local REXX script with args on the managed z/OS node. - zos_script: - cmd: ./scripts/ARGS "1,2" - - - name: Run a remote REXX script while changing its working directory. - zos_script: - cmd: /u/user/scripts/ARGS "1,2" - remote_src: true - chdir: /u/user/output_dir - - - name: Run a local Python script in the temporary directory specified in the Ansible environment variable 'remote_tmp'. - zos_script: - cmd: ./scripts/program.py - executable: /usr/bin/python3 - - - name: Run a local script made from a template. - zos_script: - cmd: ./templates/PROGRAM - use_template: true - - - name: Run a script only when a file is not present. - zos_script: - cmd: ./scripts/PROGRAM - creates: /u/user/pgm_result.txt - - - name: Run a script only when a file is already present on the remote machine. - zos_script: - cmd: ./scripts/PROGRAM - removes: /u/user/pgm_input.txt - - - name: Run a shell script on the remote system - zos_script: - cmd: ./scripts/program.sh - executable: /bin/sh - remote_src: true - - - - -Notes ------ - -.. note:: - When executing local scripts, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file being copied. - - The location in the z/OS system where local scripts will be copied to can be configured through Ansible's ``remote_tmp`` option. Refer to `Ansible's documentation `_ for more information. - - All local scripts copied to a remote z/OS system will be removed from the managed node before the module finishes executing. - - Execution permissions for the group assigned to the script will be added to remote scripts if they are missing. The original permissions for remote scripts will be restored by the module before the task ends. - - The module will only add execution permissions for the file owner. - - If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error ``BPXW0003I``. - - For supported character sets used to encode data, refer to the `documentation `_. - - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - - This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. - - The community script module does not rely on Python to execute scripts on a managed node, while this module does. Python must be present on the remote machine. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_copy_module` - - :ref:`zos_tso_command_module` - - - - -Return Values -------------- - - -cmd - Original command issued by the user. - - | **returned**: changed - | **type**: str - | **sample**: ./scripts/PROGRAM - -remote_cmd - Command executed on the remote machine. Will show the executable path used, and when running local scripts, will also show the temporary file used. - - | **returned**: changed - | **type**: str - | **sample**: /tmp/zos_script.jycqqfny.ARGS 1,2 - -msg - Failure or skip message returned by the module. - - | **returned**: failure or skipped - | **type**: str - | **sample**: File /u/user/file.txt is already missing on the system, skipping script - -rc - Return code of the script. - - | **returned**: changed - | **type**: int - | **sample**: 16 - -stdout - The STDOUT from the script, may be empty. - - | **returned**: changed - | **type**: str - | **sample**: Allocation to SYSEXEC completed. - -stderr - The STDERR from the script, may be empty. - - | **returned**: changed - | **type**: str - | **sample**: An error has ocurred. - -stdout_lines - List of strings containing individual lines from STDOUT. - - | **returned**: changed - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "Allocation to SYSEXEC completed." - ] - -stderr_lines - List of strings containing individual lines from STDERR. - - | **returned**: changed - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "An error has ocurred" - ] - diff --git a/docs/source/modules/zos_started_task.rst b/docs/source/modules/zos_started_task.rst deleted file mode 100644 index ca59f32ee3..0000000000 --- a/docs/source/modules/zos_started_task.rst +++ /dev/null @@ -1,530 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_started_task.py - -.. _zos_started_task_module: - - -zos_started_task -- Perform operations on started tasks. -======================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- start, display, modify, cancel, force and stop a started task - - - - - -Parameters ----------- - - -arm - *arm* indicates to execute normal task termination routines without causing address space destruction. - - Only applicable when *state* is ``forced``, otherwise ignored. - - | **required**: False - | **type**: bool - - -armrestart - Indicates that the batch job or started task should be automatically restarted after CANCEL or FORCE completes, if it is registered as an element of the automatic restart manager. If the job or task is not registered or if you do not specify this parameter, MVS will not automatically restart the job or task. - - Only applicable when *state* is ``cancelled`` or ``forced``, otherwise ignored. - - | **required**: False - | **type**: bool - - -asidx - When *state* is ``cancelled``, ``stopped`` or ``forced``, *asidx* is the hexadecimal address space identifier of the work unit you want to cancel, stop or force. - - Only applicable when *state* is ``stopped``, ``cancelled``, or ``forced``, otherwise ignored. - - | **required**: False - | **type**: str - - -dump - Whether to perform a dump. The type of dump (SYSABEND, SYSUDUMP, or SYSMDUMP) depends on the JCL for the job. - - Only applicable when *state* is ``cancelled``, otherwise ignored. - - | **required**: False - | **type**: bool - - -identifier_name - Option *identifier_name* is the name that identifies the task. This name can be up to 8 characters long. The first character must be alphabetical. - - | **required**: False - | **type**: str - - -system_logs - When ``system_logs=true``, the module will return system logs that describe the task's execution. This option can return a big response depending on system load, also it could surface other program's activity. - - It is not recommended to have this option on all the time, but rather use it as a debugging option. - - | **required**: False - | **type**: bool - | **default**: False - - -job_account - Specifies accounting data in the JCL JOB statement for the started task. If the source JCL already had accounting data, the value that is specified on this parameter overrides it. - - Only applicable when *state* is ``started``, otherwise ignored. - - | **required**: False - | **type**: str - - -job_name - When *state* is started, this is the name which will be assigned to a started task while starting it. If *job_name* is not specified, then *member_name* is used as job's name. - - When *state* is ``displayed``, ``modified``, ``cancelled``, ``stopped``, or ``forced``, *job_name* is the started task name used to query the system. - - | **required**: False - | **type**: str - - -keyword_parameters - Any appropriate keyword parameter that you specify to override the corresponding parameter in the cataloged procedure. The maximum length of each keyword=option pair is 66 characters. No individual value within this field can be longer than 44 characters in length. - - Only applicable when *state* is ``started``, otherwise ignored. - - | **required**: False - | **type**: dict - - -member_name - Name of a member of a partitioned data set that contains the source JCL for the task to be started. The member can be either a job or a cataloged procedure. - - *member_name* is mandatory and only applicable when *state* is ``started``, otherwise ignored. - - | **required**: False - | **type**: str - - -parameters - Program parameters passed to the started program. - - Only applicable when *state* is ``started`` or ``modified``, otherwise ignored. - - For example, REFRESH or REPLACE parameters can be passed while modifying a started task. - - | **required**: False - | **type**: list - | **elements**: str - - -reusable_asid - When *reusable_asid* is ``True`` and REUSASID(YES) is specified in the DIAGxx parmlib member, a reusable ASID is assigned to the address space created by the START command. If *reusable_asid* is not specified or REUSASID(NO) is specified in DIAGxx, an ordinary ASID is assigned. - - Only applicable when *state* is ``started``, otherwise ignored. - - | **required**: False - | **type**: bool - - -state - *state* is the desired state of the started task after the module is executed. - - If *state* is ``started`` and the respective member is not present on the managed node, then error will be thrown with ``rc=1``, ``changed=false`` and *stderr* which contains error details. - - If *state* is ``cancelled``, ``modified``, ``displayed``, ``stopped`` or ``forced`` and the started task is not running on the managed node, then error will be thrown with ``rc=1``, ``changed=false`` and *stderr* contains error details. - - If *state* is ``displayed`` and the started task is running, then the module will return the started task details along with ``changed=true``. - - | **required**: True - | **type**: str - | **choices**: started, displayed, modified, cancelled, stopped, forced - - -subsystem - The name of the subsystem that selects the task for processing. The name must be 1-4 characters long, which are defined in the IEFSSNxx parmlib member, and the subsystem must be active. - - Only applicable when *state* is ``started``, otherwise ignored. - - | **required**: False - | **type**: str - - -task_id - A unique system-generated identifier that represents a specific started task running in z/OS. This id starts with STC. - - Only applicable when *state* is ``displayed``, ``modified``, ``cancelled``, ``stopped``, or ``forced``, otherwise ignored. - - | **required**: False - | **type**: str - - -user_id - The user ID of the time-sharing user you want to cancel or force. - - Only applicable when *state* is ``cancelled`` or ``forced``, otherwise ignored. - - | **required**: False - | **type**: str - - -verbose - When ``verbose=true``, the module will return the started task execution logs. - - | **required**: False - | **type**: bool - | **default**: False - - -wait_full_time - For a started task that takes time to initialize, *wait_time* with ``wait_full_time=true`` ensures the started task completes initialization and JES updates the system control blocks. - - If ``wait_full_time=false``, the module polls every 5 seconds to check the status of the started task and returns immediately once the task is successfully validated. - - When ``wait_full_time=true``, the module waits for the duration specified in *wait_time*, even after the started task operation has been successfully validated. - - | **required**: False - | **type**: bool - | **default**: False - - -wait_time - Total time that the module will wait for a submitted task, measured in seconds. The time begins when the module is executed on the managed node. Default value of 0 means to wait the default amount of time. - - The default value is 10 seconds if this value is not specified, or if the specified value is less than 10. - - | **required**: False - | **type**: int - | **default**: 10 - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Start a started task using a member in a partitioned data set. - zos_started_task: - state: "started" - member: "PROCAPP" - - - name: Start a started task using a member name and giving it an identifier. - zos_started_task: - state: "started" - member: "PROCAPP" - identifier: "SAMPLE" - - - name: Start a started task using both a member and a job name. - zos_started_task: - state: "started" - member: "PROCAPP" - job_name: "SAMPLE" - - - name: Start a started task and enable verbose output. - zos_started_task: - state: "started" - member: "PROCAPP" - job_name: "SAMPLE" - verbose: True - - - name: Start a started task and wait for 30 seconds before fetching task details. - zos_started_task: - state: "started" - member: "PROCAPP" - verbose: True - wait_time: 30 - wait_full_time: True - - - name: Start a started task specifying the subsystem and enabling a reusable ASID. - zos_started_task: - state: "started" - member: "PROCAPP" - subsystem: "MSTR" - reusable_asid: "YES" - - - name: Display a started task using a started task name. - zos_started_task: - state: "displayed" - task_name: "PROCAPP" - - - name: Display a started task using a started task id. - zos_started_task: - state: "displayed" - task_id: "STC00012" - - - name: Display all started tasks that begin with an s using a wildcard. - zos_started_task: - state: "displayed" - task_name: "s*" - - - name: Display all started tasks. - zos_started_task: - state: "displayed" - task_name: "all" - - - name: Cancel a started task using task name. - zos_started_task: - state: "cancelled" - task_name: "SAMPLE" - - - name: Cancel a started task using a started task id. - zos_started_task: - state: "cancelled" - task_id: "STC00093" - - - name: Cancel a started task using it's task name and ASID. - zos_started_task: - state: "cancelled" - task_name: "SAMPLE" - asidx: 0014 - - - name: Modify a started task's parameters. - zos_started_task: - state: "modified" - task_name: "SAMPLE" - parameters: ["XX=12"] - - - name: Modify a started task's parameters using a started task id. - zos_started_task: - state: "modified" - task_id: "STC00034" - parameters: ["XX=12"] - - - name: Stop a started task using it's task name. - zos_started_task: - state: "stopped" - task_name: "SAMPLE" - - - name: Stop a started task using a started task id. - zos_started_task: - state: "stopped" - task_id: "STC00087" - - - name: Stop a started task using it's task name, identifier and ASID. - zos_started_task: - state: "stopped" - task_name: "SAMPLE" - identifier: "SAMPLE" - asidx: 00A5 - - - name: Force a started task using it's task name. - zos_started_task: - state: "forced" - task_name: "SAMPLE" - - - name: Force a started task using it's task id. - zos_started_task: - state: "forced" - task_id: "STC00065" - - - - - - - - - - -Return Values -------------- - - -changed - True if the state was changed, otherwise False. - - | **returned**: always - | **type**: bool - -cmd - Command executed via opercmd to achieve the desired state. - - | **returned**: changed - | **type**: str - | **sample**: S SAMPLE - -msg - Failure or skip message returned by the module. - - | **returned**: failure or skipped - | **type**: str - | **sample**: Command parameters are invalid. - -rc - The return code is 0 when command executed successfully. - - The return code is 1 when opercmd throws any error. - - The return code is 4 when task_id format is invalid. - - The return code is 5 when any parameter validation failed. - - The return code is 8 when started task is not found using task_id. - - | **returned**: changed - | **type**: int - -state - The final state of the started task, after execution. - - | **returned**: success - | **type**: str - | **sample**: S SAMPLE - -stderr - The STDERR from the command, may be empty. - - | **returned**: failure - | **type**: str - | **sample**: An error has occurred. - -stderr_lines - List of strings containing individual lines from STDERR. - - | **returned**: failure - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "An error has occurred" - ] - -stdout - The STDOUT from the command, may be empty. - - | **returned**: success - | **type**: str - | **sample**: ISF031I CONSOLE OMVS0000 ACTIVATED. - -stdout_lines - List of strings containing individual lines from STDOUT. - - | **returned**: success - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "Allocation to SYSEXEC completed." - ] - -tasks - The output information for a list of started tasks matching specified criteria. - - If no started task is found then this will return empty. - - | **returned**: success - | **type**: list - | **elements**: dict - - asidx - Address space identifier (ASID), in hexadecimal. - - | **type**: str - | **sample**: 44 - - cpu_time - The processor time used by the address space, including the initiator. This time does not include SRB time. - - *cpu_time* format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). - - ``********`` when time exceeds 100000 hours. - - ``NOTAVAIL`` when the TOD clock is not working. - - | **type**: str - | **sample**: 00000.00.00.003 - - elapsed_time - For address spaces other than system address spaces, this value represents the elapsed time since the task was selected for execution. - - For system address spaces created before master scheduler initialization, this value represents the elapsed time since the master scheduler was initialized. - - For system address spaces created after master scheduler initialization, this value represents the elapsed time since the system address space was created. - - *elapsed_time* format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). - - ``********`` when time exceeds 100000 hours. - - ``NOTAVAIL`` when the TOD clock is not working. - - | **type**: str - | **sample**: 00003.20.23.013 - - started_time - The time when the started task started. - - ``********`` when time exceeds 100000 hours. - - ``NOTAVAIL`` when the TOD clock is not working. - - | **type**: str - | **sample**: 2025-09-11 18:21:50.293644+00:00 - - task_id - The started task id. - - | **type**: str - | **sample**: STC00018 - - task_identifier - The name of a system address space. - - The name of a step, for a job or attached APPC transaction program attached by an initiator. - - The identifier of a task created by the START command. - - The name of a step that called a cataloged procedure. - - ``STARTING`` if initiation of a started job, system task, or attached APPC transaction program is incomplete. - - ``*MASTER*`` for the master address space. - - The name of an initiator address space. - - | **type**: str - | **sample**: SPROC - - task_name - The name of the started task. - - | **type**: str - | **sample**: SAMPLE - - -verbose_output - If ``verbose=true``, the system logs related to the started task executed state will be shown. - - | **returned**: success - | **type**: str - | **sample**: 04.33.04 STC00077 ---- SUNDAY, 12 OCT 2025 ----.... - diff --git a/docs/source/modules/zos_stat.rst b/docs/source/modules/zos_stat.rst deleted file mode 100644 index 4c1aa5b19f..0000000000 --- a/docs/source/modules/zos_stat.rst +++ /dev/null @@ -1,1326 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_stat.py - -.. _zos_stat_module: - - -zos_stat -- Retrieve facts from MVS data sets, USS files, aggregates and generation data groups -=============================================================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The `zos_stat <./zos_stat.html>`_ module retrieves facts from resources stored in a z/OS system. -- Resources that can be queried are UNIX System Services files, data sets, generation data groups and aggregates. - - - - - -Parameters ----------- - - -name - Name of a data set, generation data group (GDG), aggregate, or a UNIX System Services file path, to query. - - Data sets can be sequential, partitioned (PDS), partitioned extended (PDSE), VSAMs or generation data sets (GDS). - - This option doesn't accept the use of wilcards (? and *). - - | **required**: True - | **type**: str - - -volumes - Name(s) of the volume(s) where the data set will be searched on. - - If omitted, the module will look up the master catalog to find all volumes where a data set is allocated. - - When used, if the data set is not found in at least one volume from the list, the module will fail with a "data set not found" message. - - | **required**: False - | **type**: list - | **elements**: str - - -type - Type of resource to query. - - | **required**: False - | **type**: str - | **default**: data_set - | **choices**: data_set, file, aggregate, gdg - - -sms_managed - Whether the data set is managed by the Storage Management Subsystem. - - It will cause the module to retrieve additional information, may take longer to query all attributes of a data set. - - If the data set is a PDSE and the Ansible user has RACF READ authority on it, retrieving SMS information will update the last referenced date of the data set. - - If the system finds the data set is not actually managed by SMS, the rest of the attributes will still be queried and this will be noted in the output from the task. - - | **required**: False - | **type**: bool - | **default**: False - - -recall - Whether to recall a migrated data set to fully query its attributes. - - If set to ``false``, the module will return a limited amount of information for a migrated data set. - - Recalling a data set will make the module take longer to execute. - - Ignored when the data set is not found to be migrated. - - The data set will not be migrated again afterwards. - - The data set will not get recalled when running the module in check mode. - - | **required**: False - | **type**: bool - | **default**: False - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary data sets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -follow - Whether to follow symlinks when querying files. - - | **required**: False - | **type**: bool - | **default**: False - - -get_mime - Whether to get information about the nature of a file, such as the charset and type of media it represents. - - | **required**: False - | **type**: bool - | **default**: True - - -get_checksum - Whether to compute a file's checksum and return it. Otherwise ignored. - - | **required**: False - | **type**: bool - | **default**: True - - -checksum_algorithm - Algorithm used to compute a file's checksum. - - Will throw an error if the managed node is unable to use the specified algorithm. - - | **required**: False - | **type**: str - | **default**: sha1 - | **choices**: md5, sha1, sha224, sha256, sha384, sha512 - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Get the attributes of a sequential data set. - zos_stat: - name: USER.SEQ.DATA - type: data_set - - - name: Get the attributes of a sequential data set on volume '000000'. - zos_stat: - name: USER.SEQ.DATA - type: data_set - volume: "000000" - - - name: Get the attributes of a sequential data set allocated on multiple volumes. - zos_stat: - name: USER.SEQ.DATA - type: data_set - volumes: - - "000000" - - "222222" - - - name: Get the attributes of a PDSE managed by SMS. - zos_stat: - name: USER.PDSE.DATA - type: data_set - sms_managed: true - - - name: Get the attributes of a sequential data set with a non-default temporary HLQ. - zos_stat: - name: USER.SEQ.DATA - type: data_set - tmp_hlq: "RESTRICT" - - - name: Get the attributes of a generation data group. - zos_stat: - name: "USER.GDG.DATA" - type: gdg - - - name: Get the attributes of a generation data set. - zos_stat: - name: "USER.GDG.DATA(-1)" - type: data_set - - - name: Get the attributes of an aggregate. - zos_stat: - name: "HLQ.USER.ZFS.DATA" - type: aggregate - - - name: Get the attributes of a file inside Unix System Services. - zos_stat: - name: "/u/user/file.txt" - type: file - get_checksum: true - - - - -Notes ------ - -.. note:: - When querying data sets, the module will create two temporary data sets. One requires around 4 kilobytes of available space on the managed node. The second one, around 1 kilobyte of available space. Both data sets will be removed before the module finishes execution. - - Sometimes, the system could be unable to properly determine the organization or record format of the data set or the space units used to represent its allocation. When this happens, the values for these fields will be null. - - When querying a partitioned data set (PDS), if the Ansible user has RACF READ authority on it, the last referenced date will be updated by the query operation. - - - -See Also --------- - -.. seealso:: - - - :ref:`ansible.builtin.stat_module` - - :ref:`zos_find_module` - - :ref:`zos_gather_facts_module` - - - - -Return Values -------------- - - -stat - Dictionary containing information about the resource. - - Attributes that don't apply to the current resource will still be present on the dictionary with null values, so as to not break automation that relies on certain fields to be available. - - | **returned**: success - | **type**: dict - - name - Name of the resource queried. - - For Generation Data Sets (GDSs), this will be the absolute name. - - | **returned**: success - | **type**: str - | **sample**: USER.SEQ.DATA.SET - - resource_type - One of 'data_set', 'gdg', 'file' or 'aggregate'. - - | **returned**: success - | **type**: str - | **sample**: data_set - - exists - Whether name was found on the managed node. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - isfile - Whether name is a Unix System Services file. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - isdataset - Whether name is a data set. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - isaggregate - Whether name is an aggregate. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - isgdg - Whether name is a Generation Data Group. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - attributes - Dictionary containing all the stat data. - - | **returned**: success - | **type**: dict - - dsorg - Data set organization. - - | **returned**: success - | **type**: str - | **sample**: ps - - type - Type of the data set. - - | **returned**: success - | **type**: str - | **sample**: library - - record_format - Record format of a data set. - - | **returned**: success - | **type**: str - | **sample**: vb - - record_length - Record length of a data set. - - | **returned**: success - | **type**: int - | **sample**: 80 - - block_size - Block size of a data set. - - | **returned**: success - | **type**: int - | **sample**: 27920 - - has_extended_attrs - Whether a data set has extended attributes set. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - extended_attrs_bits - Current values of the EATTR bits for a data set. - - For files, it shows the current values of the extended attributes bits as a group of 4 characters. - - | **returned**: success - | **type**: str - | **sample**: opt - - creation_date - Date a data set was created. - - | **returned**: success - | **type**: str - | **sample**: 2025-01-27 - - creation_time - Time at which a data set was created. - - Only available when a data set has extended attributes. - - | **returned**: success - | **type**: str - | **sample**: 11:25:52 - - expiration_date - Expiration date of a data set. - - | **returned**: success - | **type**: str - | **sample**: 2030-12-31 - - last_reference - Date where the data set was last referenced. - - | **returned**: success - | **type**: str - | **sample**: 2025-01-28 - - updated_since_backup - Whether the data set has been updated since its last backup. - - | **returned**: success - | **type**: bool - - jcl_attrs - Dictionary containing the names of the JCL job and step that created a data set. - - Only available for data sets with extended attributes. - - | **returned**: success - | **type**: dict - - creation_job - JCL job that created the data set. - - | **returned**: success - | **type**: str - | **sample**: DSALLOC - - creation_step - JCL job step that created the data set. - - | **returned**: success - | **type**: str - | **sample**: ALLOC - - - volser - Name of the volume containing the data set. - - | **returned**: success - | **type**: str - | **sample**: 000000 - - num_volumes - Number of volumes where the data set resides. - - | **returned**: success - | **type**: int - | **sample**: 1 - - volumes - Names of the volumes where the data set resides. - - | **returned**: success - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - "000000", - "SCR03" - ] - - missing_volumes - When using the ``volumes`` option, this field will contain every volume specified in a task where the data set was missing. Will be an empty list in any other case. - - | **returned**: success - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - "222222", - "AUXVOL" - ] - - device_type - Generic device type where the data set resides. - - | **returned**: success - | **type**: str - | **sample**: 3390 - - space_units - Units used to describe sizes for the data set. - - | **returned**: success - | **type**: str - | **sample**: track - - primary_space - Primary allocation. - - Uses the space units defined in space_units. - - | **returned**: success - | **type**: int - | **sample**: 93 - - secondary_space - Secondary allocation. - - Uses the space units defined in space_units. - - | **returned**: success - | **type**: int - | **sample**: 56 - - allocation_available - Total allocation of the data set. - - Uses the space units defined in space_units. - - | **returned**: success - | **type**: int - | **sample**: 93 - - allocation_used - Total allocation used by the data set. - - Uses the space units defined in space_units. - - | **returned**: success - | **type**: int - - extents_allocated - Number of extents allocated for the data set. - - | **returned**: success - | **type**: int - | **sample**: 1 - - extents_used - Number of extents used by the data set. - - For PDSEs, this value will be null. See instead pages_used and perc_pages_used. - - | **returned**: success - | **type**: int - | **sample**: 1 - - blocks_per_track - Blocks per track for the unit contained in space_units. - - | **returned**: success - | **type**: int - | **sample**: 2 - - tracks_per_cylinder - Tracks per cylinder for the unit contained in space_units. - - | **returned**: success - | **type**: int - | **sample**: 15 - - sms_data_class - The SMS data class name. - - Only returned when the data set is managed by SMS and sms_managed is set to true. - - | **returned**: success - | **type**: str - | **sample**: standard - - sms_mgmt_class - The SMS management class name. - - Only returned when the data set is managed by SMS and sms_managed is set to true. - - | **returned**: success - | **type**: str - | **sample**: vsam - - sms_storage_class - The SMS storage class name. - - Only returned when the data set is managed by SMS and sms_managed is set to true. - - | **returned**: success - | **type**: str - | **sample**: fast - - encrypted - Whether the data set is encrypted. - - | **returned**: success - | **type**: bool - - key_status - Whether the data set has a password set to read/write. - - Value can be either one of 'none', 'read' or 'write'. - - For VSAMs, the value can also be 'supp', when the module is unable to query its security attributes. - - | **returned**: success - | **type**: str - | **sample**: none - - racf - Whether there is RACF protection set on the data set. - - Value can be either one of 'none', 'generic' or 'discrete' for non-VSAM data sets. - - For VSAMs, the value can be either 'yes' or 'no'. - - | **returned**: success - | **type**: str - | **sample**: none - - key_label - The encryption key label for an encrypted data set. - - | **returned**: success - | **type**: str - | **sample**: keydsn - - dir_blocks_allocated - Number of directory blocks allocated for a PDS. - - For PDSEs, this value will be null. See instead pages_used and perc_pages_used. - - | **returned**: success - | **type**: int - | **sample**: 5 - - dir_blocks_used - Number of directory blocks used by a PDS. - - For PDSEs, this value will be null. See instead pages_used and perc_pages_used. - - | **returned**: success - | **type**: int - | **sample**: 2 - - members - Number of members inside a partitioned data set. - - | **returned**: success - | **type**: int - | **sample**: 3 - - pages_allocated - Number of pages allocated to a PDSE. - - | **returned**: success - | **type**: int - | **sample**: 1116 - - pages_used - Number of pages used by a PDSE. The pages are 4K in size. - - | **returned**: success - | **type**: int - | **sample**: 5 - - perc_pages_used - Percentage of pages used by a PDSE. - - Gets rounded down to the nearest integer value. - - | **returned**: success - | **type**: int - | **sample**: 10 - - pdse_version - PDSE data set version. - - | **returned**: success - | **type**: int - | **sample**: 1 - - max_pdse_generation - Maximum number of generations of a member that can be maintained in a PDSE. - - | **returned**: success - | **type**: int - - seq_type - Type of sequential data set (when it applies). - - Value can be either one of 'basic', 'large' or 'extended'. - - | **returned**: success - | **type**: str - | **sample**: basic - - data - Dictionary containing attributes for the DATA component of a VSAM. - - For the rest of the attributes of this data set, query it directly with this module. - - | **returned**: success - | **type**: dict - - key_length - Key length for data records, in bytes. - - | **returned**: success - | **type**: int - | **sample**: 4 - - key_offset - Key offset for data records. - - | **returned**: success - | **type**: int - | **sample**: 3 - - max_record_length - Maximum length of data records, in bytes. - - | **returned**: success - | **type**: int - | **sample**: 80 - - avg_record_length - Average length of data records, in bytes. - - | **returned**: success - | **type**: int - | **sample**: 80 - - bufspace - Minimum buffer space in bytes to be provided by a processing program. - - | **returned**: success - | **type**: int - | **sample**: 37376 - - total_records - Total number of records. - - | **returned**: success - | **type**: int - | **sample**: 50 - - spanned - Whether the data set allows records to be spanned across control intervals. - - | **returned**: success - | **type**: bool - - volser - Name of the volume containing the DATA component. - - | **returned**: success - | **type**: str - | **sample**: 000000 - - device_type - Generic device type where the DATA component resides. - - | **returned**: success - | **type**: str - | **sample**: 3390 - - - index - Dictionary containing attributes for the INDEX component of a VSAM. - - For the rest of the attributes of this data set, query it directly with this module. - - | **returned**: success - | **type**: dict - - key_length - Key length for index records, in bytes. - - | **returned**: success - | **type**: int - | **sample**: 4 - - key_offset - Key offset for index records. - - | **returned**: success - | **type**: int - | **sample**: 3 - - max_record_length - Maximum length of index records, in bytes. - - | **returned**: success - | **type**: int - - avg_record_length - Average length of index records, in bytes. - - | **returned**: success - | **type**: int - | **sample**: 505 - - bufspace - Minimum buffer space in bytes to be provided by a processing program. - - | **returned**: success - | **type**: int - - total_records - Total number of records. - - | **returned**: success - | **type**: int - - volser - Name of the volume containing the INDEX component. - - | **returned**: success - | **type**: str - | **sample**: 000000 - - device_type - Generic device type where the INDEX component resides. - - | **returned**: success - | **type**: str - | **sample**: 3390 - - - limit - Maximum amount of active generations allowed in a GDG. - - | **returned**: success - | **type**: int - | **sample**: 10 - - scratch - Whether the GDG has the SCRATCH attribute set. - - | **returned**: success - | **type**: bool - - empty - Whether the GDG has the EMPTY attribute set. - - | **returned**: success - | **type**: bool - - order - Allocation order of new Generation Data Sets for a GDG. - - Value can be either 'lifo' or 'fifo'. - - | **returned**: success - | **type**: str - | **sample**: lifo - - purge - Whether the GDG has the PURGE attribute set. - - | **returned**: success - | **type**: bool - - extended - Whether the GDG has the EXTENDED attribute set. - - | **returned**: success - | **type**: bool - - active_gens - List of the names of the currently active generations of a GDG. - - | **returned**: success - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - "USER.GDG.G0001V00", - "USER.GDG.G0002V00" - ] - - auditfid - File system identification string for an aggregate. - - | **returned**: success - | **type**: str - | **sample**: C3C6C3F0 F0F3000E 0000 - - bitmap_file_size - Size in K of an aggregate's bitmap file. - - | **returned**: success - | **type**: int - | **sample**: 8 - - converttov5 - Value of the converttov5 flag of an aggregate. - - | **returned**: success - | **type**: bool - - filesystem_table_size - Size in K of an aggregate's filesystem table. - - | **returned**: success - | **type**: int - | **sample**: 16 - - free - Kilobytes still free in an aggregate. - - | **returned**: success - | **type**: int - | **sample**: 559 - - free_1k_fragments - Number of free 1-KB fragments in an aggregate. - - | **returned**: success - | **type**: int - | **sample**: 7 - - free_8k_blocks - Number of free 8-KB blocks in an aggregate. - - | **returned**: success - | **type**: int - | **sample**: 69 - - log_file_size - Size in K of an aggregate's log file. - - | **returned**: success - | **type**: int - | **sample**: 112 - - sysplex_aware - Value of the sysplex_aware flag of an aggregate. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - total_size - Total K available in an aggregate. - - | **returned**: success - | **type**: int - | **sample**: 648000 - - version - Version of an aggregate. - - | **returned**: success - | **type**: str - | **sample**: 1.5 - - quiesced - Attributes available when an aggregate has been quiesced. - - | **returned**: success - | **type**: dict - - job - Name of the job that quiesced the aggregate. - - | **returned**: success - | **type**: str - | **sample**: USERJOB - - system - Name of the system that quiesced the aggregate. - - | **returned**: success - | **type**: str - | **sample**: GENSYS - - timestamp - Timestamp of the quiesce operation. - - | **returned**: success - | **type**: str - | **sample**: 2025-02-01T18:02:05 - - - mode - Octal representation of a file's permissions. - - | **returned**: success - | **type**: str - | **sample**: 0755 - - atime - Time of last access for a file. - - | **returned**: success - | **type**: str - | **sample**: 2025-02-23T13:03:45 - - mtime - Time of last modification of a file. - - | **returned**: success - | **type**: str - | **sample**: 2025-02-23T13:03:45 - - ctime - Time of last metadata update or creation for a file. - - | **returned**: success - | **type**: str - | **sample**: 2025-02-23T13:03:45 - - checksum - Checksum of the file computed by the hashing algorithm specified in ``checksum_algorithm``. - - Will be null if ``get_checksum=false``. - - | **returned**: success - | **type**: str - | **sample**: 2025-02-23T13:03:45 - - uid - ID of the file's owner. - - | **returned**: success - | **type**: int - - gid - ID of the file's group. - - | **returned**: success - | **type**: int - | **sample**: 1 - - size - Size of the file in bytes. - - | **returned**: success - | **type**: int - | **sample**: 9840 - - inode - Inode number of the path. - - | **returned**: success - | **type**: int - | **sample**: 1671 - - dev - Device the inode resides on. - - | **returned**: success - | **type**: int - | **sample**: 1 - - nlink - Number of links to the inode. - - | **returned**: success - | **type**: int - | **sample**: 1 - - isdir - Whether the path is a directory. - - | **returned**: success - | **type**: bool - - ischr - Whether the path is a character device. - - | **returned**: success - | **type**: bool - - isblk - Whether the path is a block device. - - | **returned**: success - | **type**: bool - - isreg - Whether the path is a regular file. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - isfifo - Whether the path is a named pipe. - - | **returned**: success - | **type**: bool - - islnk - Whether the file is a symbolic link. - - | **returned**: success - | **type**: bool - - issock - Whether the file is a Unix domain socket. - - | **returned**: success - | **type**: bool - - isuid - Whether the Ansible user's ID matches the owner's ID. - - | **returned**: success - | **type**: bool - - isgid - Whether the Ansible user's group matches the owner's group. - - | **returned**: success - | **type**: bool - - wusr - Whether the file's owner has write permission. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - rusr - Whether the file's owner has read permission. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - xusr - Whether the file's owner has execute permission. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - wgrp - Whether the file's group has write permission. - - | **returned**: success - | **type**: bool - - rgrp - Whether the file's group has read permission. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - xgrp - Whether the file's group has execute permission. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - woth - Whether others have write permission over the file. - - | **returned**: success - | **type**: bool - - roth - Whether others have read permission over the file. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - xoth - Whether others have execute permission over the file. - - | **returned**: success - | **type**: bool - - writeable - Whether the Ansible user can write to the path. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - readable - Whether the Ansible user can read the path. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - executable - Whether the Ansible user can execute the path. - - | **returned**: success - | **type**: bool - | **sample**: - - .. code-block:: json - - true - - pw_name - User name of the file's owner. - - | **returned**: success - | **type**: str - | **sample**: username - - gr_name - Group name of the file's owner. - - | **returned**: success - | **type**: str - | **sample**: group - - lnk_source - Absolute path to the target of a symlink. - - | **returned**: success - | **type**: str - | **sample**: /etc/foobar/file - - lnk_target - Target of a symlink. - - Preserves relative paths. - - | **returned**: success - | **type**: str - | **sample**: ../foobar/file - - charset - Current encoding tag associated with the file. - - This tag does not necessarily correspond with the actual encoding of the file. - - | **returned**: success - | **type**: str - | **sample**: IBM-1047 - - mimetype - Output from the file utility describing the content. - - Will be null if ``get_mime=false``. - - | **returned**: success - | **type**: str - | **sample**: commands text - - audit_bits - Audit bits for the file. Contains two sets of 3 bits. - - First 3 bits describe the user-requested audit information. - - Last 3 bits describe the auditor-requested audit information. - - For each set, the bits represent read, write and execute/search audit options. - - An 's' means to audit successful access attempts. - - An 'f' means to audit failed access attempts. - - An 'a' means to audit all access attempts. - - An '-' means to not audit accesses. - - | **returned**: success - | **type**: str - | **sample**: fff--- - - file_format - File format (for regular files). One of "null", "bin" or "rec". - - Text data delimiter for a file. One of "nl", "cr", "lf", "crlf", "lfcr" or "crnl". - - | **returned**: success - | **type**: str - | **sample**: bin - - - diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst deleted file mode 100644 index 576ecefa1b..0000000000 --- a/docs/source/modules/zos_tso_command.rst +++ /dev/null @@ -1,171 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_tso_command.py - -.. _zos_tso_command_module: - - -zos_tso_command -- Execute TSO commands -======================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Execute TSO commands on the target z/OS system with the provided options and receive a structured response. - - - - - -Parameters ----------- - - -commands - One or more TSO commands to execute on the target z/OS system. - - Accepts a single string or list of strings as input. - - If a list of strings is provided, processing will stop at the first failure, based on rc. - - | **required**: True - | **type**: raw - - -max_rc - Specifies the maximum return code allowed for a TSO command. - - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. - - | **required**: False - | **type**: int - | **default**: 0 - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Execute TSO commands to allocate a new dataset. - zos_tso_command: - commands: - - alloc da('TEST.HILL3.TEST') like('TEST.HILL3') - - delete 'TEST.HILL3.TEST' - - - name: Execute TSO command List User (LU) for TESTUSER to obtain TSO information. - zos_tso_command: - commands: - - LU TESTUSER - - - name: Execute TSO command List Dataset (LISTDSD) and allow for maximum return code of 4. - zos_tso_command: - commands: - - LISTDSD DATASET('HLQ.DATA.SET') ALL GENERIC - max_rc: 4 - - - name: Execute TSO command to run a REXX script explicitly from a data set. - zos_tso_command: - commands: - - EXEC HLQ.DATASET.REXX exec - - - name: Chain multiple TSO commands into one invocation using semicolons. - zos_tso_command: - commands: >- - ALLOCATE DDNAME(IN1) DSNAME('HLQ.PDSE.DATA.SRC(INPUT)') SHR; - ALLOCATE DDNAME(OUT1) DSNAME('HLQ.PDSE.DATA.DEST(OUTPUT)') SHR; - OCOPY INDD(IN1) OUTDD(OUT1) BINARY; - - - name: Recall a migrated data set. - zos_tso_command: - commands: - - HRECALL 'MY.DATASET' WAIT - - - - - - - - - - -Return Values -------------- - - -output - List of each TSO command output. - - | **returned**: always - | **type**: list - | **elements**: dict - - command - The executed TSO command. - - | **returned**: always - | **type**: str - - rc - The return code from the executed TSO command. - - | **returned**: always - | **type**: int - - max_rc - Specifies the maximum return code allowed for a TSO command. - - If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. - - | **returned**: always - | **type**: int - - content - The response resulting from the execution of the TSO command. - - | **returned**: always - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "NO MODEL DATA SET OMVSADM", - "TERMUACC ", - "SUBGROUP(S)= VSAMDSET SYSCTLG BATCH SASS MASS IMSGRP1 ", - " IMSGRP2 IMSGRP3 DSNCAT DSN120 J42 M63 ", - " J91 J09 J97 J93 M82 D67 ", - " D52 M12 CCG D17 M32 IMSVS ", - " DSN210 DSN130 RAD CATLG4 VCAT CSP " - ] - - lines - The line number of the content. - - | **returned**: always - | **type**: int - - diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst deleted file mode 100644 index eec87c3eca..0000000000 --- a/docs/source/modules/zos_unarchive.rst +++ /dev/null @@ -1,555 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_unarchive.py - -.. _zos_unarchive_module: - - -zos_unarchive -- Unarchive files and data sets in z/OS. -======================================================= - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The ``zos_unarchive`` module unpacks an archive after optionally transferring it to the remote system. -- For supported archive formats, see option ``format``. -- Supported sources are USS (UNIX System Services) or z/OS data sets. -- Mixing MVS data sets with USS files for unarchiving is not supported. -- The archive is sent to the remote as binary, so no encoding is performed. - - - - - -Parameters ----------- - - -src - The remote absolute path or data set of the archive to be uncompressed. - - *src* can be a USS file or MVS data set name. - - USS file paths should be absolute paths. - - MVS data sets supported types are ``SEQ``, ``PDS``, ``PDSE``. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - | **required**: True - | **type**: str - - -format - The compression type and corresponding options to use when archiving data. - - | **required**: True - | **type**: dict - - - name - The compression format used while archiving. - - | **required**: True - | **type**: str - | **choices**: bz2, gz, tar, zip, terse, xmit, pax - - - format_options - Options specific to a compression format. - - | **required**: False - | **type**: dict - - - xmit_log_data_set - Provide the name of a data set to store xmit log output. - - If the data set provided does not exist, the program will create it. - - If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB - - When providing the *xmit_log_data_set* name, ensure there is adequate space. - - | **required**: False - | **type**: str - - - use_adrdssu - If set to true, the ``zos_unarchive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using ``xmit`` or ``terse``. - - | **required**: False - | **type**: bool - | **default**: False - - - dest_volumes - When *use_adrdssu=True*, specify the volume the data sets will be written to. - - If no volume is specified, storage management rules will be used to determine the volume where the file will be unarchived. - - If the storage administrator has specified a system default unit name and you do not set a volume name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. - - | **required**: False - | **type**: list - | **elements**: str - - - - -dest - The remote absolute path or data set where the content should be unarchived to. - - *dest* can be a USS file, directory or MVS data set name. - - If dest has missing parent directories, they will not be created. - - | **required**: False - | **type**: str - - -group - Name of the group that will own the file system objects. - - When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. - - This option is only applicable if ``dest`` is USS, otherwise ignored. - - | **required**: False - | **type**: str - - -mode - The permission of the uncompressed files. - - If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. - - It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. - - The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. - - *mode=preserve* means that the file will be given the same permissions as the source file. - - | **required**: False - | **type**: str - - -owner - Name of the user that should own the filesystem object, as would be passed to the chown command. - - When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. - - | **required**: False - | **type**: str - - -include - A list of directories, files or data set names to extract from the archive. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - When ``include`` is set, only those files will we be extracted leaving the remaining files in the archive. - - Mutually exclusive with exclude. - - | **required**: False - | **type**: list - | **elements**: str - - -exclude - List the directory and file or data set names that you would like to exclude from the unarchive action. - - GDS relative names are supported. e.g. *USER.GDG(-1*). - - Mutually exclusive with include. - - | **required**: False - | **type**: list - | **elements**: str - - -list - Will list the contents of the archive without unpacking. - - | **required**: False - | **type**: bool - | **default**: False - - -dest_data_set - Data set attributes to customize a ``dest`` data set that the archive will be copied into. - - | **required**: False - | **type**: dict - - - name - Desired name for destination dataset. - - | **required**: False - | **type**: str - - - type - Organization of the destination - - | **required**: False - | **type**: str - | **default**: seq - | **choices**: seq, pds, pdse - - - space_primary - If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_secondary - If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. - - The unit of space used is set using *space_type*. - - | **required**: False - | **type**: int - - - space_type - If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. - - Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. - - | **required**: False - | **type**: str - | **choices**: k, m, g, cyl, trk - - - record_format - If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) - - Choices are case-sensitive. - - | **required**: False - | **type**: str - | **choices**: fb, vb, fba, vba, u - - - record_length - The length of each record in the data set, in bytes. - - For variable data sets, the length must include the 4-byte prefix area. - - Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. - - | **required**: False - | **type**: int - - - block_size - The block size to use for the data set. - - | **required**: False - | **type**: int - - - directory_blocks - The number of directory blocks to allocate to the data set. - - | **required**: False - | **type**: int - - - key_offset - The key offset to use when creating a KSDS data set. - - *key_offset* is required when *type=ksds*. - - *key_offset* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - key_length - The key length to use when creating a KSDS data set. - - *key_length* is required when *type=ksds*. - - *key_length* should only be provided when *type=ksds* - - | **required**: False - | **type**: int - - - sms_storage_class - The storage class for an SMS-managed dataset. - - Required for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_data_class - The data class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - sms_management_class - The management class for an SMS-managed dataset. - - Optional for SMS-managed datasets that do not match an SMS-rule. - - Not valid for datasets that are not SMS-managed. - - Note that all non-linear VSAM datasets are SMS-managed. - - | **required**: False - | **type**: str - - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary data sets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - -force - If set to true and the remote file or data set dest exists, the dest will be deleted. - - | **required**: False - | **type**: bool - | **default**: False - - -remote_src - If set to true, ``zos_unarchive`` retrieves the archive from the remote system. - - If set to false, ``zos_unarchive`` searches the local machine (Ansible controller) for the archive. - - | **required**: False - | **type**: bool - | **default**: False - - -encoding - Specifies the character encoding conversion to be applied to the destination files after unarchiving. - - Supported character sets rely on the charset conversion utility ``iconv`` version the most common character sets are supported. - - After conversion the files are stored in same location as they were unarchived to under the same original name. No backup of the original unconverted files is there as for that unarchive can be executed again without encoding params on same source archive files. - - Destination files will be converted to the new encoding and will not be restored to their original encoding. - - If encoding fails for any file in a set of multiple files, an exception will be raised and the name of the file skipped will be provided completing the task successfully with rc code 0. - - Encoding does not check if the file is already present or not. It works on the file/files successfully unarchived. - - | **required**: False - | **type**: dict - - - from - The character set of the source *src*. - - | **required**: False - | **type**: str - - - to - The destination *dest* character set for the files to be written as. - - | **required**: False - | **type**: str - - - skip_encoding - List of names to skip encoding after unarchiving. This is only used if *encoding* is set, otherwise is ignored. - - | **required**: False - | **type**: list - | **elements**: str - - - - - -Attributes ----------- -action - | **support**: full - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: full - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - # Simple extract - - name: Copy local tar file and unpack it on the managed z/OS node. - zos_unarchive: - src: "./files/archive_folder_test.tar" - format: - name: tar - - # use include - - name: Unarchive a bzip file selecting only a file to unpack. - zos_unarchive: - src: "/tmp/test.bz2" - format: - name: bz2 - include: - - 'foo.txt' - - # Use exclude - - name: Unarchive a terse data set and excluding data sets from unpacking. - zos_unarchive: - src: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - exclude: - - USER.ARCHIVE.TEST1 - - USER.ARCHIVE.TEST2 - - # Unarchive a GDS - - name: Unarchive a terse data set and excluding data sets from unpacking. - zos_unarchive: - src: "USER.ARCHIVE(0)" - format: - name: terse - - # List option - - name: List content from XMIT - zos_unarchive: - src: "USER.ARCHIVE.RESULT.XMIT" - format: - name: xmit - format_options: - use_adrdssu: true - list: true - - # Encoding example - - name: Encode the destination data set into Latin-1 after unarchiving. - zos_unarchive: - src: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - encoding: - from: IBM-1047 - to: ISO8859-1 - - - name: Encode the destination data set into Latin-1 after unarchiving. - zos_unarchive: - src: "USER.ARCHIVE.RESULT.TRS" - format: - name: terse - encoding: - from: IBM-1047 - to: ISO8859-1 - skip_encoding: - - USER.ARCHIVE.TEST1 - - - - -Notes ------ - -.. note:: - VSAMs are not supported. - - This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_archive_module` - - - - -Return Values -------------- - - -src - File path or data set name unpacked. - - | **returned**: always - | **type**: str - -dest_path - Destination path where archive was unpacked. - - | **returned**: always - | **type**: str - -targets - List of files or data sets in the archive. - - | **returned**: success - | **type**: list - | **elements**: str - -missing - Any files or data sets not found during extraction. - - | **returned**: success - | **type**: str - -encoded - List of files or data sets that were successfully encoded. - - | **returned**: success - | **type**: list - -failed_on_encoding - List of files or data sets that were failed while encoding. - - | **returned**: success - | **type**: list - -skipped_encoding_targets - List of files or data sets that were skipped while encoding. - - | **returned**: success - | **type**: list - diff --git a/docs/source/modules/zos_volume_init.rst b/docs/source/modules/zos_volume_init.rst deleted file mode 100644 index 68027cc310..0000000000 --- a/docs/source/modules/zos_volume_init.rst +++ /dev/null @@ -1,271 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_volume_init.py - -.. _zos_volume_init_module: - - -zos_volume_init -- Initialize volumes or minidisks. -=================================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- Initialize a volume or minidisk on z/OS. -- *zos_volume_init* will create the volume label and entry into the volume table of contents (VTOC). -- Volumes are used for storing data and executable programs. -- A minidisk is a portion of a disk that is linked to your virtual machine. -- A VTOC lists the data sets that reside on a volume, their location, size, and other attributes. -- *zos_volume_init* uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class `STGADMIN.ICK.INIT`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. -- ICKDSF is an Authorized Program Facility (APF) program on z/OS, *zos_volume_init* will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. -- Note that defaults set on target z/OS systems may override ICKDSF parameters. -- If is recommended that data on the volume is backed up as the *zos_volume_init* module will not perform any backups. You can use the `zos_backup_restore <./zos_backup_restore.html>`_ module to backup a volume. - - - - - -Parameters ----------- - - -address - *address* is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. - - *address* can be the number assigned to the device (device number) when it is installed or the virtual address. - - | **required**: True - | **type**: str - - -verify_volid - Verify that the volume serial matches what is on the existing volume or minidisk. - - *verify_volid* must be 1 to 6 alphanumeric characters or ``*NONE*``. - - To verify that a volume serial number does not exist, use *verify_volid=*NONE**. - - If *verify_volid* is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. - - If *verify_volid=*NONE** is specified and a volume serial is found on the volume or minidisk, initialization does not complete. - - Note, this option is **not** a boolean, leave it blank to skip the verification. - - | **required**: False - | **type**: str - - -verify_offline - Verify that the device is not online to any other systems, initialization does not complete. - - | **required**: False - | **type**: bool - | **default**: True - - -volid - The volume serial number used to initialize a volume or minidisk. - - Expects 1-6 alphanumeric, national ($,#,@) or special characters. - - A *volid* with less than 6 characters will be padded with spaces. - - A *volid* can also be referred to as volser or volume serial number. - - When *volid* is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. - - | **required**: False - | **type**: str - - -vtoc_size - The number of tracks to initialize the volume table of contents (VTOC) with. - - The VTOC will be placed in cylinder 0 head 1. - - If no tracks are specified it will default to the number of tracks in a cylinder minus 1. Tracks in a cylinder vary based on direct-access storage device (DASD) models, for 3390 a cylinder is 15 tracks. - - | **required**: False - | **type**: int - - -index - Create a volume table of contents (VTOC) index. - - The VTOC index enhances the performance of VTOC access. - - When set to *false*, no index will be created. - - | **required**: False - | **type**: bool - | **default**: True - - -sms_managed - Specifies that the volume be managed by Storage Management System (SMS). - - If *sms_managed* is *true* then *index* must also be *true*. - - | **required**: False - | **type**: bool - | **default**: True - - -verify_volume_empty - Verify that no data sets other than the volume table of contents (VTOC) index or the VSAM Volume Data Set(VVDS) exist on the target volume. - - | **required**: False - | **type**: bool - | **default**: True - - -tmp_hlq - Override the default high level qualifier (HLQ) for temporary and backup datasets. - - The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Initialize target volume with all default options. Target volume address is '1234', set volume name to 'DEMO01'. - Target volume is checked to ensure it is offline and contains no data sets. Volume is SMS managed, has an index - and VTOC size defined by the system. - zos_volume_init: - address: "1234" - volid: "DEMO01" - - - name: Initialize target volume with all default options and additionally check the existing volid - matches the given value 'DEMO02' before re-initializing the volume and renaming it to 'DEMO01'. - zos_volume_init: - address: "1234" - volid: "DEMO01" - verify_volid: "DEMO02" - - - name: Initialize non-SMS managed target volume with all the default options. - zos_volume_init: - address: "1234" - volid: "DEMO01" - sms_managed: false - - - name: Initialize non-SMS managed target volume with all the default options and - override the default high level qualifier (HLQ). - zos_volume_init: - address: 1234 - volid: DEMO01 - sms_managed: false - tmp_hlq: TESTUSR - - - name: Initialize a new SMS managed DASD volume with new volume serial 'e8d8' with 30 track VTOC, an index, as long as - the existing volume serial is 'ine8d8' and there are no pre-existing data sets on the target. The check to see - if volume is online before intialization is skipped. - zos_volume_init: - address: e8d8 - vtoc_size: 30 - index: true - sms_managed: true - volid: ine8d8 - verify_volid: ine8d8 - verify_volume_empty: true - verify_offline: false - - - name: Initialize 3 new DASD volumes (0901, 0902, 0903) for use on a z/OS system as 'DEMO01', 'DEMO02', 'DEMO03' - using Ansible loops. - zos_volume_init: - address: "090{{ item }}" - volid: "DEMO0{{ item }}" - loop: "{{ range(1, 4, 1) }}" - - - - - - -See Also --------- - -.. seealso:: - - - :ref:`zos_backup_restore_module` - - - - -Return Values -------------- - - -msg - Failure message returned by module. - - | **returned**: failure - | **type**: str - | **sample**: 'Index' cannot be False for SMS managed volumes. - -rc - Return code from ICKDSF init command. - - | **returned**: when ICKDSF program is run. - | **type**: dict - -content - Raw output from ICKDSF. - - | **returned**: when ICKDSF program is run. - | **type**: list - | **elements**: str - | **sample**: - - .. code-block:: json - - [ - "1ICKDSF - MVS/ESA DEVICE SUPPORT FACILITIES 17.0 TIME: 18:32:22 01/17/23 PAGE 1", - "0 ", - "0 INIT UNIT(0903) NOVERIFY NOVERIFYOFFLINE VOLID(KET678) -", - "0 NODS NOINDEX", - "-ICK00700I DEVICE INFORMATION FOR 0903 IS CURRENTLY AS FOLLOWS:", - "- PHYSICAL DEVICE = 3390", - "- STORAGE CONTROLLER = 2107", - "- STORAGE CONTROL DESCRIPTOR = E8", - "- DEVICE DESCRIPTOR = 0C", - "- ADDITIONAL DEVICE INFORMATION = 4A00003C", - "- TRKS/CYL = 15, # PRIMARY CYLS = 100", - "0ICK04000I DEVICE IS IN SIMPLEX STATE", - "0ICK00703I DEVICE IS OPERATED AS A MINIDISK", - " ICK00091I 0903 NED=002107.900.IBM.75.0000000BBA01", - "-ICK03091I EXISTING VOLUME SERIAL READ = KET987", - "-ICK03096I EXISTING VTOC IS LOCATED AT CCHH=X\u00270000 0001\u0027 AND IS 14 TRACKS.", - "0ICK01314I VTOC IS LOCATED AT CCHH=X\u00270000 0001\u0027 AND IS 14 TRACKS.", - "-ICK00001I FUNCTION COMPLETED, HIGHEST CONDITION CODE WAS 0", - "0 18:32:22 01/17/23", - "0 ", - "-ICK00002I ICKDSF PROCESSING COMPLETE. MAXIMUM CONDITION CODE WAS 0" - ] - diff --git a/docs/source/modules/zos_zfs_resize.rst b/docs/source/modules/zos_zfs_resize.rst deleted file mode 100644 index 9946c5b71b..0000000000 --- a/docs/source/modules/zos_zfs_resize.rst +++ /dev/null @@ -1,314 +0,0 @@ - -:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_zfs_resize.py - -.. _zos_zfs_resize_module: - - -zos_zfs_resize -- Resize a zfs data set. -======================================== - - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The module `zos_zfs_resize. `_ can resize a zFS aggregate data set. -- The *target* data set must be a unique and a Fully Qualified Name (FQN) of a z/OS zFS aggregate data set. -- The data set must be attached as read-write. -- *size* must be provided. - - - - - -Parameters ----------- - - -target - The Fully Qualified Name of the zFS data set that will be resized. - - | **required**: True - | **type**: str - - -size - The desired size of the data set after the resizing is performed. - - | **required**: True - | **type**: int - - -space_type - The unit of measurement to use when defining the size. - - Valid units are ``k`` (kilobytes), ``m`` (megabytes), ``g`` (gigabytes), ``cyl`` (cylinders), and ``trk`` (tracks). - - | **required**: False - | **type**: str - | **default**: k - | **choices**: k, m, g, cyl, trk - - -no_auto_increase - Option controls whether the data set size will be automatically increased when performing a shrink operation. - - When set to ``true``, during the shrinking of the zFS aggregate, if more space be needed the total size will not be increased and the module will fail. - - | **required**: False - | **type**: bool - | **default**: False - - -verbose - Return diagnostic messages that describe the module's execution. - - Verbose includes standard out (stdout) of the module's execution which can be excessive, to avoid writing this to stdout, optionally you can set the ``trace_destination`` instead. - - | **required**: False - | **type**: bool - | **default**: False - - -trace_destination - Specify a unique USS file name or data set name for ``trace_destination``. - - If the destination ``trace_destination`` is a USS file or path, the ``trace_destination`` must be an absolute path name. - - Support MVS data set type PDS, PDS/E(MEMBER) - - If the destination is an MVS data set name, the ``trace_destination`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods - - Recommended characteristics for MVS data set are record length of 200, record format as vb and space primary 42000 kilobytes. - - | **required**: False - | **type**: str - - - - -Attributes ----------- -action - | **support**: none - | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. -async - | **support**: full - | **description**: Supports being used with the ``async`` keyword. -check_mode - | **support**: none - | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Resize an aggregate data set to 2500 kilobytes. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 2500 - - - name: Resize an aggregate data set to 20 tracks. - zos_zfs_resize: - target: TEST.ZFS.DATA - space_type: trk - size: 20 - - - name: Resize an aggregate data set to 4 megabytes. - zos_zfs_resize: - target: TEST.ZFS.DATA - space_type: m - size: 4 - - - name: Resize an aggregate data set to 1000 kilobytes and set no auto increase if it's shrinking. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 1000 - no_auto_increase: true - - - name: Resize an aggregate data set and get verbose output. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 2500 - verbose: true - - - name: Resize an aggregate data set and get the full trace on a file. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 2500 - trace_destination: /tmp/trace.txt - - - name: Resize an aggregate data set and capture the trace into a PDS member. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 2500 - trace_destination: "TEMP.HELPER.STORAGE(RESIZE)" - - - name: Resize an aggregate data set and capture the trace into a file with verbose output. - zos_zfs_resize: - target: TEST.ZFS.DATA - size: 2500 - verbose: true - trace_destination: /tmp/trace.txt - - - - -Notes ------ - -.. note:: - If needed, allocate the zFS trace output data set as a PDSE with RECFM=VB, LRECL=133 with a primary allocation of at least 50 cylinders and a secondary allocation of 30 cylinders. - - `zfsadm documentation `_. - - - - - - - -Return Values -------------- - - -cmd - The zfsadm command executed on the remote node. - - | **returned**: always - | **type**: str - | **sample**: zfsadm grow -aggregate SOMEUSER.VVV.ZFS -size 4096 - -target - The Fully Qualified Name of the resized zFS data set. - - | **returned**: always - | **type**: str - | **sample**: SOMEUSER.VVV.ZFS - -mount_target - The original share/mount of the data set. - - | **returned**: always - | **type**: str - | **sample**: /tmp/zfs_agg - -size - The desired size from option ``size`` according to ``space_type``. The resulting size can vary slightly, the actual space utilization is returned in ``new_size``. - - | **returned**: always - | **type**: int - | **sample**: 4024 - -rc - The return code of the zfsadm command. - - | **returned**: always - | **type**: int - -old_size - The original data set size according to ``space_type`` before resizing was performed. - - | **returned**: always - | **type**: float - | **sample**: - - .. code-block:: json - - 3096 - -old_free_space - The original data sets free space according to ``space_type`` before resizing was performed. - - | **returned**: always - | **type**: float - | **sample**: - - .. code-block:: json - - 2.1 - -new_size - The data set size according to ``space_type`` after resizing was performed. - - | **returned**: success - | **type**: float - | **sample**: - - .. code-block:: json - - 4032 - -new_free_space - The data sets free space according to ``space_type`` after resizing was performed. - - | **returned**: success - | **type**: float - | **sample**: - - .. code-block:: json - - 1.5 - -space_type - The measurement unit of space used to report all size values. - - | **returned**: always - | **type**: str - | **sample**: k - -stdout - The modules standard out (stdout) that is returned. - - | **returned**: always - | **type**: str - | **sample**: IOEZ00173I Aggregate TEST.ZFS.DATA.USER successfully grown. - -stderr - The modules standard error (stderr) that is returned. it may have no return value. - - | **returned**: always - | **type**: str - | **sample**: IOEZ00181E Could not open trace output dataset. - -stdout_lines - List of strings containing individual lines from standard out (stdout). - - | **returned**: always - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "IOEZ00173I Aggregate TEST.ZFS.DATA.USER successfully grown." - ] - -stderr_lines - List of strings containing individual lines from standard error (stderr). - - | **returned**: always - | **type**: list - | **sample**: - - .. code-block:: json - - [ - "IOEZ00181E Could not open trace output dataset." - ] - -verbose_output - If ``verbose=true``, the operation's full traceback will show for this property. - - | **returned**: always - | **type**: str - | **sample**: 6FB2F8 print_trace_table printing contents of table Main Trace Table... - diff --git a/docs/source/roles/job_status.rst b/docs/source/roles/job_status.rst deleted file mode 100644 index 42151c0fd3..0000000000 --- a/docs/source/roles/job_status.rst +++ /dev/null @@ -1,77 +0,0 @@ - -:github_url: https://github.com/IBM/ibm_zosmf/tree/master/plugins/roles/job_status - -.. _job_status_module: - - -job_status -- Role that query, extract job status and if is running. -==================================================================== - - -.. contents:: - :local: - :depth: 1 - - -Synopsis --------- -- The **IBM z/ibm_zos_core collection** provides an Ansible role, referred to as **job_status**, to query a particular job with a given job_id and parse the response to return as a msg the job status and if the job is currently running or not. - - - - - - - -Variables ---------- - - - - -job_id - The job id that has been assigned to the job. - - A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. - - When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. - - | **required**: True - | **type**: str - - - - -Examples --------- - -.. code-block:: yaml+jinja - - - - name: Query the job status and if is running of the job STC00001 - hosts: sampleHost - gather_facts: no - collections: - - ibm.ibm_zos_core - tasks: - - include_role: - name: job_status - vars: - job_oid: STC00001 - - - - -Notes ------ - -.. note:: - - The role tolerate the asterisk (`*`) as wildcard but only retrieve information from the first job returned that math the patter. - - - - - - - - From c36eb5e036cf355eb93a36a3b658191d7bc20123 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 13:43:15 -0600 Subject: [PATCH 25/31] updated source --- docs/source/modules/zos_apf.rst | 332 +++ docs/source/modules/zos_archive.rst | 600 +++++ docs/source/modules/zos_backup_restore.rst | 443 ++++ docs/source/modules/zos_blockinfile.rst | 412 ++++ docs/source/modules/zos_copy.rst | 1187 ++++++++++ docs/source/modules/zos_data_set.rst | 886 +++++++ docs/source/modules/zos_encode.rst | 335 +++ docs/source/modules/zos_fetch.rst | 359 +++ docs/source/modules/zos_find.rst | 398 ++++ docs/source/modules/zos_gather_facts.rst | 138 ++ docs/source/modules/zos_job_output.rst | 523 +++++ docs/source/modules/zos_job_query.rst | 389 +++ docs/source/modules/zos_job_submit.rst | 895 +++++++ docs/source/modules/zos_lineinfile.rst | 351 +++ docs/source/modules/zos_mount.rst | 628 +++++ docs/source/modules/zos_mvs_raw.rst | 2083 +++++++++++++++++ docs/source/modules/zos_operator.rst | 212 ++ .../modules/zos_operator_action_query.rst | 259 ++ docs/source/modules/zos_ping.rst | 88 + docs/source/modules/zos_replace.rst | 304 +++ docs/source/modules/zos_script.rst | 419 ++++ docs/source/modules/zos_started_task.rst | 530 +++++ docs/source/modules/zos_stat.rst | 1326 +++++++++++ docs/source/modules/zos_tso_command.rst | 171 ++ docs/source/modules/zos_unarchive.rst | 555 +++++ docs/source/modules/zos_volume_init.rst | 271 +++ docs/source/modules/zos_zfs_resize.rst | 314 +++ docs/source/roles/job_status.rst | 77 + 28 files changed, 14485 insertions(+) create mode 100644 docs/source/modules/zos_apf.rst create mode 100644 docs/source/modules/zos_archive.rst create mode 100644 docs/source/modules/zos_backup_restore.rst create mode 100644 docs/source/modules/zos_blockinfile.rst create mode 100644 docs/source/modules/zos_copy.rst create mode 100644 docs/source/modules/zos_data_set.rst create mode 100644 docs/source/modules/zos_encode.rst create mode 100644 docs/source/modules/zos_fetch.rst create mode 100644 docs/source/modules/zos_find.rst create mode 100644 docs/source/modules/zos_gather_facts.rst create mode 100644 docs/source/modules/zos_job_output.rst create mode 100644 docs/source/modules/zos_job_query.rst create mode 100644 docs/source/modules/zos_job_submit.rst create mode 100644 docs/source/modules/zos_lineinfile.rst create mode 100644 docs/source/modules/zos_mount.rst create mode 100644 docs/source/modules/zos_mvs_raw.rst create mode 100644 docs/source/modules/zos_operator.rst create mode 100644 docs/source/modules/zos_operator_action_query.rst create mode 100644 docs/source/modules/zos_ping.rst create mode 100644 docs/source/modules/zos_replace.rst create mode 100644 docs/source/modules/zos_script.rst create mode 100644 docs/source/modules/zos_started_task.rst create mode 100644 docs/source/modules/zos_stat.rst create mode 100644 docs/source/modules/zos_tso_command.rst create mode 100644 docs/source/modules/zos_unarchive.rst create mode 100644 docs/source/modules/zos_volume_init.rst create mode 100644 docs/source/modules/zos_zfs_resize.rst create mode 100644 docs/source/roles/job_status.rst diff --git a/docs/source/modules/zos_apf.rst b/docs/source/modules/zos_apf.rst new file mode 100644 index 0000000000..215de08519 --- /dev/null +++ b/docs/source/modules/zos_apf.rst @@ -0,0 +1,332 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_apf.py + +.. _zos_apf_module: + + +zos_apf -- Add or remove libraries to Authorized Program Facility (APF) +======================================================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Adds or removes libraries to Authorized Program Facility (APF). +- Manages APF statement persistent entries to a data set or data set member. +- Changes APF list format to "DYNAMIC" or "STATIC". +- Gets the current APF list entries. + + + + + +Parameters +---------- + + +library + The library name to be added or removed from the APF list. + + | **required**: False + | **type**: str + + +state + Ensure that the library is added ``state=present`` or removed ``state=absent``. + + The APF list format has to be "DYNAMIC". + + | **required**: False + | **type**: str + | **default**: present + | **choices**: absent, present + + +force_dynamic + Will force the APF list format to "DYNAMIC" before adding or removing libraries. + + If the format is "STATIC", the format will be changed to "DYNAMIC". + + | **required**: False + | **type**: bool + | **default**: False + + +volume + The identifier for the volume containing the library specified in the ``library`` parameter. The values must be one the following. + + 1. The volume serial number. + + 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If ``volume`` is not specified, ``library`` has to be cataloged. + + | **required**: False + | **type**: str + + +sms + Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If ``sms=True``, ``volume`` value will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + +operation + Change APF list format to "DYNAMIC" ``operation=set_dynamic`` or "STATIC" ``operation=set_static`` + + Display APF list current format ``operation=check_format`` + + Display APF list entries when ``operation=list`` ``library``, ``volume`` and ``sms`` will be used as filters. + + If ``operation`` is not set, add or remove operation will be ignored. + + | **required**: False + | **type**: str + | **choices**: set_dynamic, set_static, check_format, list + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +persistent + Add/remove persistent entries to or from *data_set_name* + + ``library`` will not be persisted or removed if ``persistent=None`` + + | **required**: False + | **type**: dict + + + data_set_name + The data set name used for persisting or removing a ``library`` from the APF list. + + | **required**: True + | **type**: str + + + marker + The marker line template. + + ``{mark}`` will be replaced with "BEGIN" and "END". + + Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + + ``{mark}`` length may not exceed 72 characters. + + The timestamp () used in the default marker follows the '+%Y%m%d-%H%M%S' date format + + | **required**: False + | **type**: str + | **default**: /* {mark} ANSIBLE MANAGED BLOCK \*/ + + + backup + Creates a backup file or backup data set for *data_set_name*, including the timestamp information to ensure that you retrieve the original APF list defined in *data_set_name*". + + *backup_name* can be used to specify a backup file name if *backup=true*. + + The backup file name will be return on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + + backup_name + Specify the USS file name or data set name for the destination backup. + + If the source *data_set_name* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + + If the source is an MVS data set, the backup_name must be an MVS data set name. + + If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + | **required**: False + | **type**: str + + + +batch + A list of dictionaries for adding or removing libraries. + + This is mutually exclusive with ``library``, ``volume``, ``sms`` + + Can be used with ``persistent`` + + | **required**: False + | **type**: list + | **elements**: dict + + + library + The library name to be added or removed from the APF list. + + | **required**: True + | **type**: str + + + volume + The identifier for the volume containing the library specified on the ``library`` parameter. The values must be one of the following. + + 1. The volume serial number + + 2. Six asterisks ``******``, indicating that the system must use the volume serial number of the current system residence (SYSRES) volume. + + 3. *MCAT*, indicating that the system must use the volume serial number of the volume containing the master catalog. + + If ``volume`` is not specified, ``library`` has to be cataloged. + + | **required**: False + | **type**: str + + + sms + Indicates that the library specified in the ``library`` parameter is managed by the storage management subsystem (SMS), and therefore no volume is associated with the library. + + If true ``volume`` will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Add a library to the APF list + zos_apf: + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + - name: Add a library (cataloged) to the APF list and persistence + zos_apf: + library: SOME.SEQUENTIAL.DATASET + force_dynamic: true + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Remove a library from the APF list and persistence + zos_apf: + state: absent + library: SOME.SEQUENTIAL.DATASET + volume: T12345 + persistent: + data_set_name: SOME.PARTITIONED.DATASET(MEM) + - name: Batch libraries with custom marker, persistence for the APF list + zos_apf: + persistent: + data_set_name: "SOME.PARTITIONED.DATASET(MEM)" + marker: "/* {mark} PROG001 USR0010 */" + batch: + - library: SOME.SEQ.DS1 + - library: SOME.SEQ.DS2 + sms: true + - library: SOME.SEQ.DS3 + volume: T12345 + - name: Print the APF list matching library pattern or volume serial number + zos_apf: + operation: list + library: SOME.SEQ.* + volume: T12345 + - name: Set the APF list format to STATIC + zos_apf: + operation: set_static + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF® FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + To add or delete the APF list entry for library libname, you must have UPDATE authority to the RACF® FACILITY resource class entity CSVAPF.libname, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list to dynamic, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.DYNAMIC, or there must be no FACILITY class profile that protects that entity. + + To change the format of the APF list back to static, you must have UPDATE authority to the RACF FACILITY resource class profile CSVAPF.MVS.SETPROG.FORMAT.STATIC, or there must be no FACILITY class profile that protects that entity. + + + + + + + +Return Values +------------- + + +stdout + The stdout from ZOAU command apfadm. Output varies based on the type of operation. + + state> stdout of the executed operator command (opercmd), "SETPROG" from ZOAU command apfadm + + operation> stdout of operation options list> Returns a list of dictionaries of APF list entries [{'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFHAUTH'}, {'vol': 'PP0L6P', 'ds': 'DFH.V5R3M0.CICS.SDFJAUTH'}, ...] set_dynamic> Set to DYNAMIC set_static> Set to STATIC check_format> DYNAMIC or STATIC + + | **returned**: always + | **type**: str + +stderr + The error messages from ZOAU command apfadm + + | **returned**: always + | **type**: str + | **sample**: BGYSC1310E ADD Error: Dataset COMMON.LINKLIB volume COMN01 is already present in APF list. + +rc + The return code from ZOAU command apfadm + + | **returned**: always + | **type**: int + +msg + The module messages + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true, always + | **type**: str + diff --git a/docs/source/modules/zos_archive.rst b/docs/source/modules/zos_archive.rst new file mode 100644 index 0000000000..2a51654019 --- /dev/null +++ b/docs/source/modules/zos_archive.rst @@ -0,0 +1,600 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_archive.py + +.. _zos_archive_module: + + +zos_archive -- Archive files and data sets on z/OS. +=================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Create or extend an archive on a remote z/OS system. +- Sources for archiving must be on the remote z/OS system. +- Supported sources are USS (UNIX System Services) or z/OS data sets. +- The archive remains on the remote z/OS system. +- For supported archive formats, see option ``format``. + + + + + +Parameters +---------- + + +src + List of names or globs of UNIX System Services (USS) files, PS (sequential data sets), PDS, PDSE to compress or archive. + + USS file paths should be absolute paths. + + GDS relative notation is supported. + + MVS data sets supported types are: ``SEQ``, ``PDS``, ``PDSE``. + + VSAMs are not supported. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + | **required**: True + | **type**: list + | **elements**: str + + +format + The compression type and corresponding options to use when archiving data. + + | **required**: False + | **type**: dict + + + name + The compression format to use. + + | **required**: False + | **type**: str + | **default**: gz + | **choices**: bz2, gz, tar, zip, terse, xmit, pax + + + format_options + Options specific to a compression format. + + | **required**: False + | **type**: dict + + + terse_pack + Compression option for use with the terse format, *name=terse*. + + Pack will compress records in a data set so that the output results in lossless data compression. + + Spack will compress records in a data set so the output results in complex data compression. + + Spack will produce smaller output and take approximately 3 times longer than pack compression. + + | **required**: False + | **type**: str + | **choices**: pack, spack + + + xmit_log_data_set + Provide the name of a data set to store xmit log output. + + If the data set provided does not exist, the program will create it. + + If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB + + When providing the *xmit_log_data_set* name, ensure there is adequate space. + + | **required**: False + | **type**: str + + + use_adrdssu + If set to true, the ``zos_archive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to compress data sets into a portable format before using ``xmit`` or ``terse``. + + | **required**: False + | **type**: bool + | **default**: False + + + + +dest + The remote absolute path or data set where the archive should be created. + + *dest* can be a USS file or MVS data set name. + + If *dest* has missing parent directories, they will be created. + + If *dest* is a nonexistent USS file, it will be created. + + If *dest* is an existing file or data set and *force=true*, the existing *dest* will be deleted and recreated with attributes defined in the *dest_data_set* option or computed by the module. + + If *dest* is an existing file or data set and *force=false* or not specified, the module exits with a note to the user. + + Destination data set attributes can be set using *dest_data_set*. + + Destination data set space will be calculated based on space of source data sets provided and/or found by expanding the pattern name. Calculating space can impact module performance. Specifying space attributes in the *dest_data_set* option will improve performance. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + | **required**: True + | **type**: str + + +exclude + Remote absolute path, glob, or list of paths, globs, data set name patterns or generation data sets (GDSs) in relative notation for the file, files or data sets to exclude from src list and glob expansion. + + Patterns (wildcards) can contain one of the following, `?`, `*`. + + * matches everything. + + ? matches any single character. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + | **required**: False + | **type**: list + | **elements**: str + + +group + Name of the group that will own the archive file. + + When left unspecified, it uses the current group of the current use unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if ``dest`` is USS, otherwise ignored. + + | **required**: False + | **type**: str + + +mode + The permission of the destination archive file. + + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + + The mode may also be specified as a symbolic mode (for example, 'u+rwx' or 'u=rw,g=r,o=r') or a special string 'preserve'. + + *mode=preserve* means that the file will be given the same permissions as the src file. + + | **required**: False + | **type**: str + + +owner + Name of the user that should own the archive file, as would be passed to the chown command. + + When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if ``dest`` is USS, otherwise ignored. + + | **required**: False + | **type**: str + + +remove + Remove any added source files , trees or data sets after module `zos_archive <./zos_archive.html>`_ adds them to the archive. Source files, trees and data sets are identified with option *src*. + + | **required**: False + | **type**: bool + | **default**: False + + +dest_data_set + Data set attributes to customize a ``dest`` data set to be archived into. + + | **required**: False + | **type**: dict + + + name + Desired name for destination dataset. + + | **required**: False + | **type**: str + + + type + Organization of the destination + + | **required**: False + | **type**: str + | **default**: seq + | **choices**: seq + + + space_primary + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_secondary + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_type + If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + | **required**: False + | **type**: str + | **choices**: k, m, g, cyl, trk + + + record_format + If the destination data set does not exist, this sets the format of the data set. (e.g ``FB``) + + Choices are case-sensitive. + + | **required**: False + | **type**: str + | **choices**: fb, vb, fba, vba, u + + + record_length + The length of each record in the data set, in bytes. + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + | **required**: False + | **type**: int + + + block_size + The block size to use for the data set. + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + sms_storage_class + The storage class for an SMS-managed dataset. + + Required for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_data_class + The data class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_management_class + The management class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary data sets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +force + If set to ``true`` and the remote file or data set ``dest`` will be deleted. Otherwise it will be created with the ``dest_data_set`` attributes or default values if ``dest_data_set`` is not specified. + + If set to ``false``, the file or data set will only be copied if the destination does not exist. + + If set to ``false`` and destination exists, the module exits with a note to the user. + + | **required**: False + | **type**: bool + | **default**: False + + +encoding + Specifies the character encoding conversion to be applied to the source files before archiving. + + Supported character sets rely on the charset conversion utility ``iconv`` version the most common character sets are supported. + + After conversion the files are stored in same location and name as src and the same src is taken in consideration for archive. + + Source files will be converted to the new encoding and will not be restored to their original encoding. + + If encoding fails for any file in a set of multiple files, an exception will be raised and archiving will be skipped. + + The original files in ``src`` will be converted. The module will revert the encoding conversion after a successful archive, but no backup will be created. If you need to encode using a backup and then archive take a look at `zos_encode <./zos_encode.html>`_ module. + + | **required**: False + | **type**: dict + + + from + The character set of the source *src*. + + | **required**: False + | **type**: str + + + to + The destination *dest* character set for the files to be written as. + + | **required**: False + | **type**: str + + + skip_encoding + List of names to skip encoding before archiving. This is only used if *encoding* is set, otherwise is ignored. + + | **required**: False + | **type**: list + | **elements**: str + + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + # Simple archive + - name: Archive file into a tar + zos_archive: + src: /tmp/archive/foo.txt + dest: /tmp/archive/foo_archive_test.tar + format: + name: tar + + # Archive multiple files + - name: Archive list of files into a zip + zos_archive: + src: + - /tmp/archive/foo.txt + - /tmp/archive/bar.txt + dest: /tmp/archive/foo_bar_archive_test.zip + format: + name: zip + + # Archive one data set into terse + - name: Archive data set into a terse + zos_archive: + src: "USER.ARCHIVE.TEST" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + + # Use terse with different options + - name: Archive data set into a terse, specify pack algorithm and use adrdssu + zos_archive: + src: "USER.ARCHIVE.TEST" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + format_options: + terse_pack: "spack" + use_adrdssu: true + + # Use a pattern to store + - name: Archive data set pattern using xmit + zos_archive: + src: "USER.ARCHIVE.*" + exclude_sources: "USER.ARCHIVE.EXCLUDE.*" + dest: "USER.ARCHIVE.RESULT.XMIT" + format: + name: xmit + + - name: Archive multiple GDSs into a terse + zos_archive: + src: + - "USER.GDG(0)" + - "USER.GDG(-1)" + - "USER.GDG(-2)" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + format_options: + use_adrdssu: true + + - name: Archive multiple data sets into a new GDS + zos_archive: + src: "USER.ARCHIVE.*" + dest: "USER.GDG(+1)" + format: + name: terse + format_options: + use_adrdssu: true + + - name: Encode the source data set into Latin-1 before archiving into a terse data set + zos_archive: + src: "USER.ARCHIVE.TEST" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Encode and archive multiple data sets but skip encoding for a few. + zos_archive: + src: + - "USER.ARCHIVE1.TEST" + - "USER.ARCHIVE2.TEST" + dest: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + format_options: + use_adrdssu: true + encoding: + from: IBM-1047 + to: ISO8859-1 + skip_encoding: + - "USER.ARCHIVE2.TEST" + + + + +Notes +----- + +.. note:: + This module does not perform a send or transmit operation to a remote node. If you want to transport the archive you can use zos_fetch to retrieve to the controller and then zos_copy or zos_unarchive for copying to a remote or send to the remote and then unpack the archive respectively. + + When packing and using ``use_adrdssu`` flag the module will take up to two times the space indicated in ``dest_data_set``. + + tar, zip, bz2 and pax are archived using python ``tarfile`` library which uses the latest version available for each format, for compatibility when opening from system make sure to use the latest available version for the intended format. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_fetch_module` + - :ref:`zos_unarchive_module` + + + + +Return Values +------------- + + +state + The state of the input ``src``. + + ``absent`` when the source files or data sets were removed. + + ``present`` when the source files or data sets were not removed. + + ``incomplete`` when ``remove`` was true and the source files or data sets were not removed. + + | **returned**: always + | **type**: str + +dest_state + The state of the *dest* file or data set. + + ``absent`` when the file does not exist. + + ``archive`` when the file is an archive. + + ``compress`` when the file is compressed, but not an archive. + + ``incomplete`` when the file is an archive, but some files under *src* were not found. + + | **returned**: success + | **type**: str + +missing + Any files or data sets that were missing from the source. + + | **returned**: success + | **type**: list + +archived + Any files or data sets that were compressed or added to the archive. + + | **returned**: success + | **type**: list + +arcroot + If ``src`` is a list of USS files, this returns the top most parent folder of the list of files, otherwise is empty. + + | **returned**: always + | **type**: str + +expanded_sources + The list of matching paths from the src option. + + | **returned**: always + | **type**: list + +expanded_exclude_sources + The list of matching exclude paths from the exclude option. + + | **returned**: always + | **type**: list + +encoded + List of files or data sets that were successfully encoded. + + | **returned**: success + | **type**: list + +failed_on_encoding + List of files or data sets that were failed while encoding. + + | **returned**: success + | **type**: list + +skipped_encoding_targets + List of files or data sets that were skipped while encoding. + + | **returned**: success + | **type**: list + diff --git a/docs/source/modules/zos_backup_restore.rst b/docs/source/modules/zos_backup_restore.rst new file mode 100644 index 0000000000..f335ca632c --- /dev/null +++ b/docs/source/modules/zos_backup_restore.rst @@ -0,0 +1,443 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_backup_restore.py + +.. _zos_backup_restore_module: + + +zos_backup_restore -- Backup and restore data sets and volumes +============================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Create and restore from backups of data sets and volumes. +- Data set backups are performed using logical dumps, volume backups are performed using physical dumps. +- Backups are compressed using AMATERSE. +- Backups are created by first dumping data sets with ADRDSSU, followed by compression with AMATERSE. +- Restoration is performed by first decompressing an archive with AMATERSE, then restoring with ADRDSSU. +- Since ADRDSSU and AMATERSE are used to create and restore backups, backups can be restored to systems where Ansible and ZOAU are not available. Conversely, dumps created with ADRDSSU and AMATERSE can be restored using this module. + + + + + +Parameters +---------- + + +operation + Used to specify the operation to perform. + + | **required**: True + | **type**: str + | **choices**: backup, restore + + +data_sets + Determines which data sets to include in the backup. + + | **required**: False + | **type**: dict + + + include + When *operation=backup*, specifies a list of data sets or data set patterns to include in the backup. + + When *operation=backup* GDS relative names are supported. + + When *operation=restore*, specifies a list of data sets or data set patterns to include when restoring from a backup. + + The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate to DFSMSdss that only part of a qualifier has been specified. + + When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + + Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. + + A question mark ``?`` or percent sign ``%`` matches a single character. + + | **required**: False + | **type**: raw + + + exclude + When *operation=backup*, specifies a list of data sets or data set patterns to exclude from the backup. + + When *operation=backup* GDS relative names are supported. + + When *operation=restore*, specifies a list of data sets or data set patterns to exclude when restoring from a backup. + + The single asterisk, ``*``, is used in place of exactly one qualifier. In addition, it can be used to indicate that only part of a qualifier has been specified." + + When used with other qualifiers, the double asterisk, ``**``, indicates either the nonexistence of leading, trailing, or middle qualifiers, or the fact that they play no role in the selection process. + + Two asterisks are the maximum permissible in a qualifier. If there are two asterisks in a qualifier, they must be the first and last characters. + + A question mark ``?`` or percent sign ``%`` matches a single character. + + | **required**: False + | **type**: raw + + + +volume + This applies to both data set restores and volume restores. + + When *operation=backup* and *data_sets* are provided, specifies the volume that contains the data sets to backup. + + When *operation=restore*, specifies the volume the backup should be restored to. + + *volume* is required when restoring a full volume backup. + + | **required**: False + | **type**: str + + +full_volume + When *operation=backup* and *full_volume=True*, specifies that the entire volume provided to *volume* should be backed up. + + When *operation=restore* and *full_volume=True*, specifies that the volume should be restored (default is dataset). + + *volume* must be provided when *full_volume=True*. + + | **required**: False + | **type**: bool + | **default**: False + + +temp_volume + Specifies a particular volume on which the temporary data sets should be created during the backup and restore process. + + When *operation=backup* and *backup_name* is a data set, specifies the volume the backup should be placed in. + + | **required**: False + | **type**: str + + +backup_name + When *operation=backup*, the destination data set or UNIX file to hold the backup. + + When *operation=restore*, the destination data set or UNIX file backup to restore. + + There are no enforced conventions for backup names. However, using a common extension like ``.dzp`` for UNIX files and ``.DZP`` for data sets will improve readability. + + GDS relative names are supported when *operation=restore*. + + | **required**: True + | **type**: str + + +recover + When *recover=true* and *operation=backup* then potentially recoverable errors will be ignored. + + | **required**: False + | **type**: bool + | **default**: False + + +overwrite + When *operation=backup*, specifies if an existing data set or UNIX file matching *backup_name* should be deleted. + + When *operation=restore*, specifies if the module should overwrite existing data sets with matching name on the target device. + + | **required**: False + | **type**: bool + | **default**: False + + +compress + When *operation=backup*, enables compression of partitioned data sets using system-level compression features. If supported, this may utilize zEDC hardware compression. + + This option can reduce the size of the temporary dataset generated during backup operations either before the AMATERSE step when *terse* is True or the resulting backup when *terse* is False. + + | **required**: False + | **type**: bool + | **default**: False + + +terse + When *operation=backup*, executes an AMATERSE step to compress and pack the temporary data set for the backup. This creates a backup with a format suitable for transferring off-platform. + + If *operation=backup* and if *dataset=False* then option *terse* must be True. + + | **required**: False + | **type**: bool + | **default**: True + + +sms_storage_class + When *operation=restore*, specifies the storage class to use. The storage class will also be used for temporary data sets created during restore process. + + When *operation=backup*, specifies the storage class to use for temporary data sets created during backup process. + + If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + + | **required**: False + | **type**: str + + +sms_management_class + When *operation=restore*, specifies the management class to use. The management class will also be used for temporary data sets created during restore process. + + When *operation=backup*, specifies the management class to use for temporary data sets created during backup process. + + If neither of *sms_storage_class* or *sms_management_class* are specified, the z/OS system's Automatic Class Selection (ACS) routines will be used. + + | **required**: False + | **type**: str + + +space + If *operation=backup*, specifies the amount of space to allocate for the backup. Please note that even when backing up to a UNIX file, backup contents will be temporarily held in a data set. + + If *operation=restore*, specifies the amount of space to allocate for data sets temporarily created during the restore process. + + The unit of space used is set using *space_type*. + + When *full_volume=True*, *space* defaults to ``1``, otherwise default is ``25`` + + | **required**: False + | **type**: int + + +space_type + The unit of measurement to use when defining data set space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + When *full_volume=True*, *space_type* defaults to ``g``, otherwise default is ``m`` + + | **required**: False + | **type**: str + | **default**: m + | **choices**: k, m, g, cyl, trk + + +hlq + Specifies the new HLQ to use for the data sets being restored. + + If no value is provided, the data sets will be restored with their original HLQs. + + | **required**: False + | **type**: str + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary data sets used in the module's operation. + + If *tmp_hlq* is set, this value will be applied to all temporary data sets. + + If *tmp_hlq* is not set, the value will be the username who submits the ansible task, this is the default behavior. If the username can not be identified, the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Backup all data sets matching the pattern USER.** to data set MY.BACKUP.DZP + zos_backup_restore: + operation: backup + data_sets: + include: user.** + backup_name: MY.BACKUP.DZP + + - name: Backup all data sets matching the patterns USER.** or PRIVATE.TEST.* + excluding data sets matching the pattern USER.PRIVATE.* to data set MY.BACKUP.DZP + zos_backup_restore: + operation: backup + data_sets: + include: + - user.** + - private.test.* + exclude: user.private.* + backup_name: MY.BACKUP.DZP + + - name: Backup a list of GDDs to data set my.backup.dzp + zos_backup_restore: + operation: backup + data_sets: + include: + - user.gdg(-1) + - user.gdg(0) + backup_name: my.backup.dzp + + - name: Backup datasets using compress + zos_backup_restore: + operation: backup + compress: true + terse: true + data_sets: + include: someds.name.here + backup_name: my.backup.dzp + + - name: Backup all datasets matching the pattern USER.** to UNIX file /tmp/temp_backup.dzp, ignore recoverable errors. + zos_backup_restore: + operation: backup + data_sets: + include: user.** + backup_name: /tmp/temp_backup.dzp + recover: true + + - name: Backup all datasets matching the pattern USER.** to data set MY.BACKUP.DZP, + allocate 100MB for data sets used in backup process. + zos_backup_restore: + operation: backup + data_sets: + include: user.** + backup_name: MY.BACKUP.DZP + space: 100 + space_type: m + + - name: + Backup all datasets matching the pattern USER.** that are present on the volume MYVOL1 to data set MY.BACKUP.DZP, + allocate 100MB for data sets used in the backup process. + zos_backup_restore: + operation: backup + data_sets: + include: user.** + volume: MYVOL1 + backup_name: MY.BACKUP.DZP + space: 100 + space_type: m + + - name: Backup an entire volume, MYVOL1, to the UNIX file /tmp/temp_backup.dzp, + allocate 1GB for data sets used in backup process. + zos_backup_restore: + operation: backup + backup_name: /tmp/temp_backup.dzp + volume: MYVOL1 + full_volume: true + space: 1 + space_type: g + + - name: Restore data sets from a backup stored in the UNIX file /tmp/temp_backup.dzp. + Restore the data sets with the original high level qualifiers. + zos_backup_restore: + operation: restore + backup_name: /tmp/temp_backup.dzp + + - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. + Only restore data sets whose last, or only qualifier is TEST. + Use MYHLQ as the new HLQ for restored data sets. + zos_backup_restore: + operation: restore + data_sets: + include: "**.TEST" + backup_name: /tmp/temp_backup.dzp + hlq: MYHLQ + + - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. + Only restore data sets whose last, or only qualifier is TEST. + Use MYHLQ as the new HLQ for restored data sets. Restore data sets to volume MYVOL2. + zos_backup_restore: + operation: restore + data_sets: + include: "**.TEST" + volume: MYVOL2 + backup_name: /tmp/temp_backup.dzp + hlq: MYHLQ + + - name: Restore data sets from backup stored in the data set MY.BACKUP.DZP. + Use MYHLQ as the new HLQ for restored data sets. + zos_backup_restore: + operation: restore + backup_name: MY.BACKUP.DZP + hlq: MYHLQ + + - name: Restore volume from backup stored in the data set MY.BACKUP.DZP. + Restore to volume MYVOL2. + zos_backup_restore: + operation: restore + volume: MYVOL2 + full_volume: true + backup_name: MY.BACKUP.DZP + space: 1 + space_type: g + + - name: Restore data sets from backup stored in the UNIX file /tmp/temp_backup.dzp. + Specify DB2SMS10 for the SMS storage and management classes to use for the restored + data sets. + zos_backup_restore: + operation: restore + volume: MYVOL2 + backup_name: /tmp/temp_backup.dzp + sms_storage_class: DB2SMS10 + sms_management_class: DB2SMS10 + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF FACILITY resource class. A user is described as the remote user, configured to run either the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + When using this module, if the RACF FACILITY class profile **STGADMIN.ADR.DUMP.TOLERATE.ENQF** is active, you must have READ access authority to use the module option *recover=true*. If the RACF FACILITY class checking is not set up, any user can use the module option without access to the class. + + If your system uses a different security product, consult that product's documentation to configure the required security classes. + + + + + + + +Return Values +------------- + + +changed + Indicates if the operation made changes. + + ``true`` when backup/restore was successful, ``false`` otherwise. + + | **returned**: always + | **type**: bool + | **sample**: + + .. code-block:: json + + true + +backup_name + The USS file name or data set name that was used as a backup. + + Matches the *backup_name* parameter provided as input. + + | **returned**: always + | **type**: str + | **sample**: /u/oeusr03/my_backup.dzp + +message + Returns any important messages about the modules execution, if any. + + | **returned**: always + | **type**: str + diff --git a/docs/source/modules/zos_blockinfile.rst b/docs/source/modules/zos_blockinfile.rst new file mode 100644 index 0000000000..041182ca10 --- /dev/null +++ b/docs/source/modules/zos_blockinfile.rst @@ -0,0 +1,412 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_blockinfile.py + +.. _zos_blockinfile_module: + + +zos_blockinfile -- Manage block of multi-line textual data on z/OS +================================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Manage block of multi-lines in z/OS UNIX System Services (USS) files, PS (sequential data set), PDS, PDSE, or member of a PDS or PDSE. +- This module ensures a particular block of multi-line text surrounded by customizable marker lines is present in a USS file or data set, or replaces an existing block identified by the markers. +- This is primarily useful when you want to change a block of multi-line text in a USS file or data set. + + + + + +Parameters +---------- + + +src + The location can be a UNIX System Services (USS) file, PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. + + The USS file must be an absolute pathname. + + Generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). + + | **required**: True + | **type**: str + + +state + Whether the block should be inserted or replaced using *state=present*. + + Whether the block should be removed using *state=absent*. + + | **required**: False + | **type**: str + | **default**: present + | **choices**: absent, present + + +marker + The marker line template. + + ``{mark}`` will be replaced with the values ``in marker_begin`` (default="BEGIN") and ``marker_end`` (default="END"). + + Using a custom marker without the ``{mark}`` variable may result in the block being repeatedly inserted on subsequent playbook runs. + + | **required**: False + | **type**: str + | **default**: # {mark} ANSIBLE MANAGED BLOCK + + +block + The text to insert inside the marker lines. + + Multi-line can be separated by '\n'. + + Any double-quotation marks will be removed. + + | **required**: False + | **type**: str + + +insertafter + If specified, the block will be inserted after the last match of the specified regular expression. + + A special value ``EOF`` for inserting a block at the end of the file is available. + + If a specified regular expression has no matches, ``EOF`` will be used instead. + + Choices are EOF or '*regex*'. + + Default is EOF. + + | **required**: False + | **type**: str + + +insertbefore + If specified, the block will be inserted before the last match of specified regular expression. + + A special value ``BOF`` for inserting the block at the beginning of the file is available. + + If a specified regular expression has no matches, the block will be inserted at the end of the file. + + Choices are BOF or '*regex*'. + + | **required**: False + | **type**: str + + +marker_begin + This will be inserted at ``{mark}`` in the opening ansible block marker. + + Value needs to be different from *marker_end*. + + | **required**: False + | **type**: str + | **default**: BEGIN + + +marker_end + This will be inserted at ``{mark}`` in the closing ansible block marker. + + Value must be different from *marker_begin*. + + | **required**: False + | **type**: str + | **default**: END + + +backup + Specifies whether a backup of destination should be created before editing the source *src*. + + When set to ``true``, the module creates a backup file or data set. + + The backup file name will be returned on either success or failure of module execution such that data can be retrieved. + + Use generation data set (GDS) relative positive name. e.g. *SOME.CREATION(+1*). + + | **required**: False + | **type**: bool + | **default**: False + + +backup_name + Specify the USS file name or data set name for the destination backup. + + If the source *src* is a USS file or path, the backup_name name must be a file or path name, and the USS file or path must be an absolute path name. + + If the source is an MVS data set, the backup_name name must be an MVS data set name, and the dataset must not be preallocated. + + If the backup_name is not provided, the default backup_name name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + + | **required**: False + | **type**: str + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +encoding + The character set of the source *src*. `zos_blockinfile <./zos_blockinfile.html>`_ requires it to be provided with correct encoding to read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: False + | **type**: str + | **default**: IBM-1047 + + +force + Specifies that the data set can be shared with others during an update which results in the data set you are updating to be simultaneously updated by others. + + This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. + + The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + + | **required**: False + | **type**: bool + | **default**: False + + +indentation + Defines the number of spaces needed to prepend in every line of the block. + + | **required**: False + | **type**: int + | **default**: 0 + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Insert/Update new mount point + zos_blockinfile: + src: SYS1.PARMLIB(BPXPRM00) + marker: "/* {mark} ANSIBLE MANAGED BLOCK */" + block: | + MOUNT FILESYSTEM('SOME.DATA.SET') TYPE(ZFS) MODE(READ) + MOUNTPOINT('/tmp/src/somedirectory') + - name: Remove a library as well as surrounding markers + zos_blockinfile: + state: absent + src: SYS1.PARMLIB(PROG00) + marker: "/* {mark} ANSIBLE MANAGED BLOCK FOR SOME.DATA.SET */" + - name: Add ZOAU path to PATH in /etc/profile + zos_blockinfile: + src: /etc/profile + insertafter: "PATH=" + block: | + ZOAU=/path/to/zoau_dir/bin + export ZOAU + PATH=$ZOAU:$PATH + - name: Insert/Update HTML surrounded by custom markers after line + zos_blockinfile: + path: /var/www/html/index.html + marker: "" + insertafter: "" + block: | +

Welcome to {{ ansible_hostname }}

+

Last updated on {{ ansible_date_time.iso8601 }}

+ - name: Remove HTML as well as surrounding markers + zos_blockinfile: + path: /var/www/html/index.html + state: absent + marker: "" + - name: Add mappings to /etc/hosts + zos_blockinfile: + path: /etc/hosts + block: | + {{ item.ip }} {{ item.name }} + marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}" + loop: + - { name: host1, ip: 10.10.1.10 } + - { name: host2, ip: 10.10.1.11 } + - { name: host3, ip: 10.10.1.12 } + - name: Add a code block to a member using a predefined indentation. + zos_blockinfile: + path: SYS1.PARMLIB(BPXPRM00) + block: | + DSN SYSTEM({{ DB2SSID }}) + RUN PROGRAM(DSNTEP2) PLAN(DSNTEP12) - + LIB('{{ DB2RUN }}.RUNLIB.LOAD') + indentation: 16 + + - name: Update a script with commands containing quotes. + zos_blockinfile: + src: "/u/scripts/script.sh" + insertafter: "EOF" + block: | + cat "//'{{ DS_NAME }}'" + cat "//'{{ DS_NAME_2 }}'" + + - name: Set facts for the following two tasks. + set_fact: + HLQ: 'ANSIBLE' + MLQ: 'MEMBER' + LLQ: 'TEST' + MEM: '(JCL)' + MSG: 'your first JCL program' + CONTENT: "{{ lookup('file', 'files/content.txt') }}" + + - name: Update JCL in a PDS member with Jinja2 variable syntax. + zos_blockinfile: + src: "{{ HLQ }}.{{MLQ}}.{{LLQ}}{{MEM}}" + insertafter: "HELLO, WORLD" + marker: "//* {mark} *//" + marker_begin: "Begin Ansible Block Insertion 1" + marker_end: "End Ansible Block Insertion 1" + state: present + block: | + This is {{ MSG }}, and its now + managed by Ansible. + + - name: Update JCL in PDS member with content from a file. + zos_blockinfile: + src: "{{ HLQ }}.{{MLQ}}.{{LLQ}}{{MEM}}" + insertafter: "End Ansible Block Insertion 1" + marker: "//* {mark} *//" + marker_begin: "Begin Ansible Block Insertion 2" + marker_end: "End Ansible Block Insertion 2" + block: "{{ CONTENT }}" + + - name: Add a block to a gds + zos_blockinfile: + src: TEST.SOME.CREATION(0) + insertafter: EOF + block: "{{ CONTENT }}" + + - name: Add a block to dataset and backup in a new generation of gds + zos_blockinfile: + src: SOME.CREATION.TEST + insertbefore: BOF + backup: true + backup_name: CREATION.GDS(+1) + block: "{{ CONTENT }}" + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + + For supported character sets used to encode data, refer to the `documentation `_. + + When using ``with_*`` loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. + + When more then one block should be handled in a file you must change the *marker* per task. + + When working with a backup of a sequential dataset, the backup name should also be a sequential dataset. This will avoid the false positive and error condition during backup. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_data_set_module` + + + + +Return Values +------------- + + +changed + Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + 1 + +found + Number of the matching patterns + + | **returned**: success + | **type**: int + | **sample**: 5 + +cmd + Constructed ZOAU dmod shell command based on the parameters + + | **returned**: success + | **type**: str + | **sample**: dmod -d -b -c IBM-1047 -m "BEGIN\nEND\n# {mark} ANSIBLE MANAGED BLOCK" -e "$ a\\PATH=/dir/bin:$PATH" /etc/profile + +msg + The module messages + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +stdout + The stdout from ZOAU dmod when json.loads() fails to parse the result from dmod + + | **returned**: failure + | **type**: str + +stderr + The error messages from ZOAU dmod + + | **returned**: failure + | **type**: str + | **sample**: BGYSC1311E Iconv error, cannot open converter from ISO-88955-1 to IBM-1047 + +rc + The return code from ZOAU dmod when json.loads() fails to parse the result from dmod + + | **returned**: failure + | **type**: bool + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true, always + | **type**: str + | **sample**: /path/to/file.txt.2015-02-03@04:15~ + diff --git a/docs/source/modules/zos_copy.rst b/docs/source/modules/zos_copy.rst new file mode 100644 index 0000000000..5fe5e565f5 --- /dev/null +++ b/docs/source/modules/zos_copy.rst @@ -0,0 +1,1187 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_copy.py + +.. _zos_copy_module: + + +zos_copy -- Copy data to z/OS +============================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The `zos_copy <./zos_copy.html>`_ module copies a file or data set from a local or a remote machine to a location on the remote machine. + + + + + +Parameters +---------- + + +asa_text + If set to ``true``, indicates that either ``src`` or ``dest`` or both contain ASA control characters. + + When ``src`` is a USS file and ``dest`` is a data set, the copy will preserve ASA control characters in the destination. + + When ``src`` is a data set containing ASA control characters and ``dest`` is a USS file, the copy will put all control characters as plain text in the destination. + + If ``dest`` is a non-existent data set, it will be created with record format Fixed Block with ANSI format (FBA). + + If neither ``src`` or ``dest`` have record format Fixed Block with ANSI format (FBA) or Variable Block with ANSI format (VBA), the module will fail. + + This option is only valid for text files. If ``is_binary`` is ``true`` or ``executable`` is ``true`` as well, the module will fail. + + | **required**: False + | **type**: bool + | **default**: False + + +identical_gdg_copy + If set to ``true``, and the destination GDG does not exist, the module will copy the source GDG to the destination GDG with identical GDS absolute names. + + If set to ``false``, the copy will be done as a normal copy, without preserving the source GDG absolute names. + + | **required**: False + | **type**: bool + | **default**: False + + +backup + Specifies whether a backup of the destination should be created before copying data. + + When set to ``true``, the module creates a backup file or data set. + + The backup file name will be returned on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + +backup_name + Specify a unique USS file name or data set name for the destination backup. + + If the destination ``dest`` is a USS file or path, the ``backup_name`` must be an absolute path name. + + If the destination is an MVS data set name, the ``backup_name`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods. + + If the ``backup_name`` is not provided, the default ``backup_name`` will be used. If the ``dest`` is a USS file or USS path, the name of the backup file will be the destination file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. If the ``dest`` is an MVS data set, it will be a data set with a randomly generated name. + + If ``dest`` is a data set member and ``backup_name`` is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + + If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). + + | **required**: False + | **type**: str + + +content + When used instead of ``src``, sets the contents of a file or data set directly to the specified value. + + Works only when ``dest`` is a USS file, sequential data set, or a partitioned data set member. + + If ``dest`` is a directory, then content will be copied to ``/path/to/dest/inline_copy``. + + | **required**: False + | **type**: str + + +dest + The remote absolute path or data set where the content should be copied to. + + ``dest`` can be a USS file, directory or MVS data set name. + + ``dest`` can be a alias name of a PS, PDS or PDSE data set. + + If ``dest`` has missing parent directories, they will be created. + + If ``dest`` is a nonexistent USS file, it will be created. + + If ``dest`` is a new USS file or replacement, the file will be appropriately tagged with either the system's default locale or the encoding option defined. If the USS file is a replacement, the user must have write authority to the file either through ownership, group or other permissions, else the module will fail. + + If ``dest`` is a nonexistent data set, it will be created following the process outlined here and in the ``volume`` option. + + If ``dest`` is a nonexistent data set, the attributes assigned will depend on the type of ``src``. If ``src`` is a USS file, ``dest`` will have a Fixed Block (FB) record format and the remaining attributes will be computed. If *is_binary=true*, ``dest`` will have a Fixed Block (FB) record format with a record length of 80, block size of 32720, and the remaining attributes will be computed. If *executable=true*,``dest`` will have an Undefined (U) record format with a record length of 0, block size of 32760, and the remaining attributes will be computed. + + If ``src`` is a file and ``dest`` a partitioned data set, ``dest`` does not need to include a member in its value, the module can automatically compute the resulting member name from ``src``. + + When ``dest`` is a data set, precedence rules apply. If ``dest_data_set`` is set, this will take precedence over an existing data set. If ``dest`` is an empty data set, the empty data set will be written with the expectation its attributes satisfy the copy. Lastly, if no precendent rule has been exercised, ``dest`` will be created with the same attributes of ``src``. + + When the ``dest`` is an existing VSAM (KSDS) or VSAM (ESDS), then source can be an ESDS, a KSDS or an RRDS. The VSAM (KSDS) or VSAM (ESDS) ``dest`` will be deleted and recreated following the process outlined in the ``volume`` option. + + When the ``dest`` is an existing VSAM (RRDS), then the source must be an RRDS. The VSAM (RRDS) will be deleted and recreated following the process outlined in the ``volume`` option. + + When ``dest`` is and existing VSAM (LDS), then source must be an LDS. The VSAM (LDS) will be deleted and recreated following the process outlined in the ``volume`` option. + + ``dest`` can be a previously allocated generation data set (GDS) or a new GDS. + + When ``dest`` is a generation data group (GDG), ``src`` must be a GDG too. The copy will allocate successive new generations in ``dest``, the module will verify it has enough available generations before starting the copy operations. + + When ``dest`` is a data set, you can override storage management rules by specifying ``volume`` if the storage class being used has GUARANTEED_SPACE=YES specified, otherwise, the allocation will fail. See ``volume`` for more volume related processes. + + | **required**: True + | **type**: str + + +encoding + Specifies which encodings the destination file or data set should be converted from and to. + + If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. Note that this is only done for text data and not binary data. + + Only valid if ``is_binary`` is false. + + | **required**: False + | **type**: dict + + + from + The encoding to be converted from + + | **required**: True + | **type**: str + + + to + The encoding to be converted to + + | **required**: False + | **type**: str + + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +force + If set to ``true`` and the remote file or data set ``dest`` is empty, the ``dest`` will be reused. + + If set to ``true`` and the remote file or data set ``dest`` is NOT empty, the ``dest`` will be deleted and recreated with the ``src`` data set attributes, otherwise it will be recreated with the ``dest`` data set attributes. + + To backup data before any deletion, see parameters ``backup`` and ``backup_name``. + + If set to ``false``, the file or data set will only be copied if the destination does not exist. + + If set to ``false`` and destination exists, the module exits with a note to the user. + + | **required**: False + | **type**: bool + | **default**: False + + +force_lock + By default, when ``dest`` is a MVS data set and is being used by another process with DISP=SHR or DISP=OLD the module will fail. Use ``force_lock`` to bypass DISP=SHR and continue with the copy operation. + + If set to ``true`` and destination is a MVS data set opened by another process then zos_copy will try to copy using DISP=SHR. + + Using ``force_lock`` uses operations that are subject to race conditions and can lead to data loss, use with caution. + + If a data set member has aliases, and is not a program object, copying that member to a dataset that is in use will result in the aliases not being preserved in the target dataset. When this scenario occurs the module will fail. + + | **required**: False + | **type**: bool + | **default**: False + + +ignore_sftp_stderr + During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will cause the module to fail. + + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + + | **required**: False + | **type**: bool + | **default**: True + + +is_binary + If set to ``true``, indicates that the file or data set to be copied is a binary file or data set. + + When *is_binary=true*, no encoding conversion is applied to the content, all content transferred retains the original state. + + Use *is_binary=true* when copying a Database Request Module (DBRM) to retain the original state of the serialized SQL statements of a program. + + | **required**: False + | **type**: bool + | **default**: False + + +executable + If set to ``true``, indicates that the file or library to be copied is an executable. + + If *executable=true*, and ``dest`` is a data set, it must be a PDS or PDSE (library). + + If ``dest`` is a nonexistent data set, the library attributes assigned will be Undefined (U) record format with a record length of 0, block size of 32760 and the remaining attributes will be computed. + + If ``dest`` is a file, execute permission for the user will be added to the file (``u+x``). + + If the ``src`` executable has an alias, the alias will not be copied unless ``aliases=true``. + + | **required**: False + | **type**: bool + | **default**: False + + +aliases + If set to ``true``, indicates that any aliases found in the source (USS file, USS dir, PDS/E library or member) are to be preserved during the copy operation. + + Aliases are implicitly preserved when libraries are copied over to USS destinations. That is, when ``executable=True`` and ``dest`` is a USS file or directory, this option will be ignored. + + Copying of aliases for text-based data sets from USS sources or to USS destinations is not currently supported. + + If the ``dest`` is Unix, the alias is not visible in Unix, even though the information is there and will be visible if copied to a library. + + | **required**: False + | **type**: bool + | **default**: False + + +local_follow + This flag indicates that any existing filesystem links in the source tree should be followed. + + | **required**: False + | **type**: bool + | **default**: True + + +group + Name of the group that will own the file system objects. + + When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if ``dest`` is USS, otherwise ignored. + + | **required**: False + | **type**: str + + +mode + The permission of the destination file or directory. + + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + + The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + + *mode=preserve* means that the file will be given the same permissions as the source file. + + | **required**: False + | **type**: str + + +owner + Name of the user that should own the filesystem object, as would be passed to the chown command. + + When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if ``dest`` is USS, otherwise ignored. + + | **required**: False + | **type**: str + + +remote_src + If set to ``false``, the module searches for ``src`` at the local machine. + + If set to ``true``, the module goes to the remote/target machine for ``src``. + + | **required**: False + | **type**: bool + | **default**: False + + +src + Path to a file/directory or name of a data set to copy to remote z/OS system. + + ``src`` can be a alias name of a PS, PDS or PDSE data set. + + If ``remote_src`` is true, then ``src`` must be the path to a Unix System Services (USS) file, name of a data set, or data set member. + + If ``src`` is a local path or a USS path, it can be absolute or relative. + + If ``src`` is a directory, ``dest`` must be a partitioned data set or a USS directory. + + If ``src`` is a file and ``dest`` ends with "/" or is a directory, the file is copied to the directory with the same filename as ``src``. + + If ``src`` is a directory and ends with "/", the contents of it will be copied into the root of ``dest``. If it doesn't end with "/", the directory itself will be copied. + + If ``src`` is a directory or a file, file names will be truncated and/or modified to ensure a valid name for a data set or member. + + If ``src`` is a VSAM data set, ``dest`` must also be a VSAM. + + If ``src`` is a generation data set (GDS), it must be a previously allocated one. + + If ``src`` is a generation data group (GDG), ``dest`` can be another GDG or a USS directory. + + Wildcards can be used to copy multiple PDS/PDSE members to another PDS/PDSE. i.e. Using SOME.TEST.PDS(*) will copy all members from one PDS/E to another without removing the destination PDS/E. + + Required unless using ``content``. + + | **required**: False + | **type**: str + + +validate + Specifies whether to perform checksum validation for source and destination files. + + Valid only for USS destination, otherwise ignored. + + | **required**: False + | **type**: bool + | **default**: False + + +volume + If ``dest`` does not exist, specify which volume ``dest`` should be allocated to. + + Only valid when the destination is an MVS data set. + + The volume must already be present on the device. + + If no volume is specified, storage management rules will be used to determine the volume where ``dest`` will be allocated. + + If the storage administrator has specified a system default unit name and you do not set a ``volume`` name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. + + | **required**: False + | **type**: str + + +dest_data_set + Data set attributes to customize a ``dest`` data set to be copied into. + + Some attributes only apply when ``dest`` is a generation data group (GDG). + + | **required**: False + | **type**: dict + + + type + Organization of the destination + + | **required**: True + | **type**: str + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, member, basic, large, library, gdg + + + space_primary + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_secondary + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_type + If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + | **required**: False + | **type**: str + | **choices**: k, m, g, cyl, trk + + + record_format + If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) + + Choices are case-sensitive. + + | **required**: False + | **type**: str + | **choices**: fb, vb, fba, vba, u + + + record_length + The length of each record in the data set, in bytes. + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + | **required**: False + | **type**: int + + + block_size + The block size to use for the data set. + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + key_offset + The key offset to use when creating a KSDS data set. + + *key_offset* is required when *type=ksds*. + + *key_offset* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + key_length + The key length to use when creating a KSDS data set. + + *key_length* is required when *type=ksds*. + + *key_length* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + sms_storage_class + The storage class for an SMS-managed dataset. + + Required for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_data_class + The data class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_management_class + The management class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + limit + Sets the *limit* attribute for a GDG. + + Specifies the maximum number, from 1 to 255(up to 999 if extended), of generations that can be associated with the GDG being defined. + + *limit* is required when *type=gdg*. + + | **required**: False + | **type**: int + + + empty + Sets the *empty* attribute for a GDG. + + If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. + + If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. + + | **required**: False + | **type**: bool + + + scratch + Sets the *scratch* attribute for a GDG. + + Specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. + + | **required**: False + | **type**: bool + + + purge + Sets the *purge* attribute for a GDG. + + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. + + | **required**: False + | **type**: bool + + + extended + Sets the *extended* attribute for a GDG. + + If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. + + If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. + + | **required**: False + | **type**: bool + + + fifo + Sets the *fifo* attribute for a GDG. + + If false, the order is the newest GDS defined to the oldest GDS. This is the default value. + + If true, the order is the oldest GDS defined to the newest GDS. + + | **required**: False + | **type**: bool + + + +use_template + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + + Only valid when ``src`` is a local file or directory. + + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + + | **required**: False + | **type**: bool + | **default**: False + + +template_parameters + Options to set the way Jinja2 will process templates. + + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + + These options are ignored unless ``use_template`` is true. + + | **required**: False + | **type**: dict + + + variable_start_string + Marker for the beginning of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: {{ + + + variable_end_string + Marker for the end of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: }} + + + block_start_string + Marker for the beginning of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: {% + + + block_end_string + Marker for the end of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: %} + + + comment_start_string + Marker for the beginning of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: {# + + + comment_end_string + Marker for the end of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: #} + + + line_statement_prefix + Prefix used by Jinja2 to identify line-based statements. + + | **required**: False + | **type**: str + + + line_comment_prefix + Prefix used by Jinja2 to identify comment lines. + + | **required**: False + | **type**: str + + + lstrip_blocks + Whether Jinja2 should strip leading spaces from the start of a line to a block. + + | **required**: False + | **type**: bool + | **default**: False + + + trim_blocks + Whether Jinja2 should remove the first newline after a block is removed. + + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + + | **required**: False + | **type**: bool + | **default**: True + + + keep_trailing_newline + Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. + + | **required**: False + | **type**: bool + | **default**: False + + + newline_sequence + Sequence that starts a newline in a template. + + | **required**: False + | **type**: str + | **default**: \\n + | **choices**: \\n, \\r, \\r\\n + + + auto_reload + Whether to reload a template file when it has changed after the task has started. + + | **required**: False + | **type**: bool + | **default**: False + + + autoescape + Whether to enable autoescape of XML/HTML elements on a template. + + | **required**: False + | **type**: bool + | **default**: True + + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Copy a local file to a sequential data set + zos_copy: + src: /path/to/sample_seq_data_set + dest: SAMPLE.SEQ.DATA.SET + + - name: Copy a local file to a USS location and validate checksum + zos_copy: + src: /path/to/test.log + dest: /tmp/test.log + validate: true + + - name: Copy a local ASCII encoded file and convert to IBM-1047 + zos_copy: + src: /path/to/file.txt + dest: /tmp/file.txt + + - name: Copy a local directory to a PDSE + zos_copy: + src: /path/to/local/dir/ + dest: HLQ.DEST.PDSE + + - name: Copy file with permission details + zos_copy: + src: /path/to/foo.conf + dest: /etc/foo.conf + mode: "0644" + group: foo + owner: bar + + - name: Module will follow the symbolic link specified in src + zos_copy: + src: /path/to/link + dest: /path/to/uss/location + local_follow: true + + - name: Copy a local file to a PDS member and convert encoding + zos_copy: + src: /path/to/local/file + dest: HLQ.SAMPLE.PDSE(MEMBER) + encoding: + from: UTF-8 + to: IBM-037 + + - name: Copy a VSAM (KSDS) to a VSAM (KSDS) + zos_copy: + src: SAMPLE.SRC.VSAM + dest: SAMPLE.DEST.VSAM + remote_src: true + + - name: Copy inline content to a sequential dataset and replace existing data + zos_copy: + content: 'Inline content to be copied' + dest: SAMPLE.SEQ.DATA.SET + + - name: Copy a USS file to sequential data set and convert encoding beforehand + zos_copy: + src: /path/to/remote/uss/file + dest: SAMPLE.SEQ.DATA.SET + remote_src: true + + - name: Copy a USS directory to another USS directory + zos_copy: + src: /path/to/uss/dir + dest: /path/to/dest/dir + remote_src: true + + - name: Copy a local binary file to a PDSE member + zos_copy: + src: /path/to/binary/file + dest: HLQ.SAMPLE.PDSE(MEMBER) + is_binary: true + + - name: Copy a sequential data set to a PDS member + zos_copy: + src: SAMPLE.SEQ.DATA.SET + dest: HLQ.SAMPLE.PDSE(MEMBER) + remote_src: true + + - name: Copy a local file and take a backup of the existing file + zos_copy: + src: /path/to/local/file + dest: /path/to/dest + backup: true + backup_name: /tmp/local_file_backup + + - name: Copy a PDS on remote system to a new PDS + zos_copy: + src: HLQ.SRC.PDS + dest: HLQ.NEW.PDS + remote_src: true + + - name: Copy a PDS on remote system to a PDS, replacing the original + zos_copy: + src: HLQ.SAMPLE.PDSE + dest: HLQ.EXISTING.PDSE + remote_src: true + force: true + + - name: Copy PDS member to a new PDS member. Replace if it already exists + zos_copy: + src: HLQ.SAMPLE.PDSE(SRCMEM) + dest: HLQ.NEW.PDSE(DESTMEM) + remote_src: true + force: true + + - name: Copy a USS file to a PDSE member. If PDSE does not exist, allocate it + zos_copy: + src: /path/to/uss/src + dest: DEST.PDSE.DATA.SET(MEMBER) + remote_src: true + + - name: Copy a sequential data set to a USS file + zos_copy: + src: SRC.SEQ.DATA.SET + dest: /tmp/ + remote_src: true + + - name: Copy a PDSE member to USS file + zos_copy: + src: SRC.PDSE(MEMBER) + dest: /tmp/member + remote_src: true + + - name: Copy a PDS to a USS directory (/tmp/SRC.PDS) + zos_copy: + src: SRC.PDS + dest: /tmp + remote_src: true + + - name: Copy all members inside a PDS to another PDS + zos_copy: + src: SOME.SRC.PDS(*) + dest: SOME.DEST.PDS + remote_src: true + + - name: Copy all members starting with 'ABC' inside a PDS to another PDS + zos_copy: + src: SOME.SRC.PDS(ABC*) + dest: SOME.DEST.PDS + remote_src: true + + - name: Allocate destination in a specific volume + zos_copy: + src: SOME.SRC.PDS + dest: SOME.DEST.PDS + volume: 'VOL033' + remote_src: true + + - name: Copy a USS file to a fully customized sequential data set + zos_copy: + src: /path/to/uss/src + dest: SOME.SEQ.DEST + remote_src: true + volume: '222222' + dest_data_set: + type: seq + space_primary: 10 + space_secondary: 3 + space_type: k + record_format: vb + record_length: 150 + + - name: Copy a Program Object and its aliases on a remote system to a new PDSE member MYCOBOL + zos_copy: + src: HLQ.COBOLSRC.PDSE(TESTPGM) + dest: HLQ.NEW.PDSE(MYCOBOL) + remote_src: true + executable: true + aliases: true + + - name: Copy a Load Library from a USS directory /home/loadlib to a new PDSE + zos_copy: + src: '/home/loadlib/' + dest: HLQ.LOADLIB.NEW + remote_src: true + executable: true + aliases: true + + - name: Copy a file with ASA characters to a new sequential data set. + zos_copy: + src: ./files/print.txt + dest: HLQ.PRINT.NEW + asa_text: true + + - name: Copy a file to a new generation data set. + zos_copy: + src: /path/to/uss/src + dest: HLQ.TEST.GDG(+1) + remote_src: true + + - name: Copy a local file and take a backup of the existing file with a GDS. + zos_copy: + src: /path/to/local/file + dest: /path/to/dest + backup: true + backup_name: HLQ.BACKUP.GDG(+1) + + + + +Notes +----- + +.. note:: + Destination data sets are assumed to be in catalog. When trying to copy to an uncataloged data set, the module assumes that the data set does not exist and will create it. + + Destination will be backed up if either ``backup`` is ``true`` or ``backup_name`` is provided. If ``backup`` is ``false`` but ``backup_name`` is provided, task will fail. + + When copying local files or directories, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file or directory being copied. Temporary files will always be deleted, regardless of success or failure of the copy task. + + VSAM data sets can only be copied to other VSAM data sets. + + For supported character sets used to encode data, refer to the `documentation `_. + + This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + + Beginning in version 1.8.x, zos_copy will no longer attempt to correct a copy of a data type member into a PDSE that contains program objects. You can control this behavior using module option ``executable`` that will signify an executable is being copied into a PDSE with other executables. Mixing data type members with program objects will result in a (FSUM8976,./zos_copy.html) error. + + It is the playbook author or user's responsibility to ensure they have appropriate authority to the RACF FACILITY resource class. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + If trying to copy a migrated data set, first recall it before executing this module. This module does not perform recalls automatically. See modules `zos_mvs_raw <./zos_mvs_raw.html>`_ and `zos_tso_cmd <./zos_tso_cmd.html>`_ for examples of how to recall migrated data sets using this collection. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_fetch_module` + - :ref:`zos_data_set_module` + - :ref:`zos_mvs_raw_module` + - :ref:`zos_tso_cmd_module` + + + + +Return Values +------------- + + +src + Source file or data set being copied. + + | **returned**: changed + | **type**: str + | **sample**: /path/to/source.log + +dest + Destination file/path or data set name. + + | **returned**: success + | **type**: str + | **sample**: SAMPLE.SEQ.DATA.SET + +dest_created + Indicates whether the module created the destination. + + | **returned**: success and if dest was created by the module. + | **type**: bool + | **sample**: + + .. code-block:: json + + true + +destination_attributes + Attributes of a dest created by the module. + + | **returned**: success and destination was created by the module. + | **type**: dict + | **sample**: + + .. code-block:: json + + { + "block_size": 32760, + "record_format": "fb", + "record_length": 45, + "space_primary": 2, + "space_secondary": 1, + "space_type": "k", + "type": "pdse" + } + + block_size + Block size of the dataset. + + | **type**: int + | **sample**: 32760 + + record_format + Record format of the dataset. + + | **type**: str + | **sample**: fb + + record_length + Record length of the dataset. + + | **type**: int + | **sample**: 45 + + space_primary + Allocated primary space for the dataset. + + | **type**: int + | **sample**: 2 + + space_secondary + Allocated secondary space for the dataset. + + | **type**: int + | **sample**: 1 + + space_type + Unit of measurement for space. + + | **type**: str + | **sample**: k + + type + Type of dataset allocated. + + | **type**: str + | **sample**: pdse + + +checksum + SHA256 checksum of the file after running zos_copy. + + | **returned**: When ``validate=true`` and if ``dest`` is USS + | **type**: str + | **sample**: 8d320d5f68b048fc97559d771ede68b37a71e8374d1d678d96dcfa2b2da7a64e + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true or backup_name=true + | **type**: str + | **sample**: /path/to/file.txt.2015-02-03@04:15~ + +gid + Group id of the file, after execution. + + | **returned**: success and if dest is USS + | **type**: int + | **sample**: 100 + +group + Group of the file, after execution. + + | **returned**: success and if dest is USS + | **type**: str + | **sample**: httpd + +owner + Owner of the file, after execution. + + | **returned**: success and if dest is USS + | **type**: str + | **sample**: httpd + +uid + Owner id of the file, after execution. + + | **returned**: success and if dest is USS + | **type**: int + | **sample**: 100 + +mode + Permissions of the target, after execution. + + | **returned**: success and if dest is USS + | **type**: str + | **sample**: 420 + +size + Size(in bytes) of the target, after execution. + + | **returned**: success and dest is USS + | **type**: int + | **sample**: 1220 + +state + State of the target, after execution. + + | **returned**: success and if dest is USS + | **type**: str + | **sample**: file + +note + A note to the user after module terminates. + + | **returned**: When ``force=true`` and ``dest`` exists + | **type**: str + | **sample**: No data was copied + +msg + Failure message returned by the module. + + | **returned**: failure + | **type**: str + | **sample**: Error while gathering data set information + +stdout + The stdout from a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: Copying local file /tmp/foo/src to remote path /tmp/foo/dest + +stderr + The stderr of a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: No such file or directory "/tmp/foo" + +stdout_lines + List of strings containing individual lines from stdout. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "u\"Copying local file /tmp/foo/src to remote path /tmp/foo/dest..\"" + ] + +stderr_lines + List of strings containing individual lines from stderr. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + { + "u\"FileNotFoundError": "No such file or directory \u0027/tmp/foo\u0027\"" + } + ] + +rc + The return code of a USS or MVS command, if applicable. + + | **returned**: failure + | **type**: int + | **sample**: 8 + +cmd + The MVS command issued, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: REPRO INDATASET(SAMPLE.DATA.SET) OUTDATASET(SAMPLE.DEST.DATA.SET) + diff --git a/docs/source/modules/zos_data_set.rst b/docs/source/modules/zos_data_set.rst new file mode 100644 index 0000000000..9578ad66a0 --- /dev/null +++ b/docs/source/modules/zos_data_set.rst @@ -0,0 +1,886 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_data_set.py + +.. _zos_data_set_module: + + +zos_data_set -- Manage data sets +================================ + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Create, delete and set attributes of data sets. +- When forcing data set replacement, contents will not be preserved. + + + + + +Parameters +---------- + + +name + The name of the data set being managed. (e.g ``USER.TEST``) + + If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + + Required if *type=member* or *state!=present* and not using *batch*. + + | **required**: False + | **type**: str + + +state + The final state desired for specified data set. + + If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + + + If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + + + If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + + + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + + + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + + + If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If the volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + + + If *state=absent* and *type=gdg* and the GDG base has active generations the module will complete successfully with *changed=False*. To remove it option *force* needs to be used. If the GDG base does not have active generations the module will complete successfully with *changed=True*. + + + If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + + + If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + + + If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + + + If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + + + If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + + + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + + + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + + + If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + + + If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + + + If *state=present*, the data set is already cataloged and *volumes* is provided, the module will compare the volumes where it is cataloged against the provided *volumes*. If they don't match, the module will fail with an error indicating the data set is cataloged on a different volume. To resolve this, you must first uncatalog the data set before creating it on the new volume. + + + If *state=present*, the data set is already cataloged, *volumes* is provided, and the volumes match exactly, no action is taken and the module completes successfully with *changed=False*. + + + | **required**: False + | **type**: str + | **default**: present + | **choices**: present, absent, cataloged, uncataloged + + +type + The data set type to be used when creating a data set. (e.g ``pdse``). + + ``member`` expects to be used with an existing partitioned data set. + + Choices are case-sensitive. + + | **required**: False + | **type**: str + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs, gdg + + +space_primary + The amount of primary space to allocate for the dataset. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + | **default**: 5 + + +space_secondary + The amount of secondary space to allocate for the dataset. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + | **default**: 3 + + +space_type + The unit of measurement to use when defining primary and secondary space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + | **required**: False + | **type**: str + | **default**: m + | **choices**: k, m, g, cyl, trk + + +record_format + The format of the data set. (e.g ``FB``) + + Choices are case-sensitive. + + When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. + + | **required**: False + | **type**: str + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f + + +sms_storage_class + The storage class for an SMS-managed dataset. + + Required for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + +sms_data_class + The data class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + +sms_management_class + The management class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + +record_length + The length, in bytes, of each record in the data set. + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + | **required**: False + | **type**: int + + +block_size + The block size to use for the data set. + + | **required**: False + | **type**: int + + +directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + +key_offset + The key offset to use when creating a KSDS data set. + + *key_offset* is required when *type=ksds*. + + *key_offset* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + +key_length + The key length to use when creating a KSDS data set. + + *key_length* is required when *type=ksds*. + + *key_length* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + +empty + Sets the *empty* attribute for Generation Data Groups. + + If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. + + If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. + + | **required**: False + | **type**: bool + | **default**: False + + +extended + Sets the *extended* attribute for Generation Data Groups. + + If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. + + If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. + + | **required**: False + | **type**: bool + | **default**: False + + +fifo + Sets the *fifo* attribute for Generation Data Groups. + + If false, the order is the newest GDS defined to the oldest GDS. This is the default value. + + If true, the order is the oldest GDS defined to the newest GDS. + + | **required**: False + | **type**: bool + | **default**: False + + +limit + Sets the *limit* attribute for Generation Data Groups. + + Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. + + *limit* is required when *type=gdg*. + + | **required**: False + | **type**: int + + +purge + Sets the *purge* attribute for Generation Data Groups. + + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. + + | **required**: False + | **type**: bool + | **default**: False + + +scratch + When ``state=absent``, specifies whether to physically remove the data set from the volume. + + If ``scratch=true``, the data set is deleted and its entry is removed from the volume's VTOC. + + If ``scratch=false``, the data set is uncataloged but not physically removed from the volume. This is the equivalent of using ``NOSCRATCH`` in an ``IDCAMS DELETE`` command. + + When ``state=present`` option **scratch** sets the *scratch* attribute for Generation Data Groups and is ignored for any other data set type. + + When ``state=present`` and ``type=GDG`` specifies what action is to be taken for a generation data set located on disk volumes when the data set is uncataloged from the GDG base as a result of EMPTY/NOEMPTY processing. + + | **required**: False + | **type**: bool + + +volumes + If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + + + If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + + + If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + + + If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + + + *volumes* is required when *state=cataloged*. + + Accepts a string when using a single volume and a list of strings when using multiple. + + | **required**: False + | **type**: raw + + +replace + When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + + Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. + + + If *replace=True*, all data in the original data set will be lost. + + | **required**: False + | **type**: bool + | **default**: False + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +force + Specifies that the data set can be shared with others during a member delete operation which results in the data set you are updating to be simultaneously updated by others. + + This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. + + The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + + The *force=True* only applies to data set members when *state=absent* and *type=member* and when removing a GDG base with active generations. + + If *force=True*, *type=gdg* and *state=absent* it will force remove a GDG base with active generations. + + | **required**: False + | **type**: bool + | **default**: False + + +batch + Batch can be used to perform operations on multiple data sets in a single module call. + + | **required**: False + | **type**: list + | **elements**: dict + + + name + The name of the data set being managed. (e.g ``USER.TEST``) + + If *name* is not provided, a randomized data set name will be generated with the HLQ matching the module-runners username. + + Required if *type=member* or *state!=present* + + | **required**: False + | **type**: str + + + state + The final state desired for specified data set. + + If *state=absent* and the data set does not exist on the managed node, no action taken, module completes successfully with *changed=False*. + + + If *state=absent* and the data set does exist on the managed node, remove the data set, module completes successfully with *changed=True*. + + + If *state=absent* and *type=member* and *force=True*, the data set will be opened with *DISP=SHR* such that the entire data set can be accessed by other processes while the specified member is deleted. + + + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, then the data set is removed. Module completes successfully with *changed=True*. + + + If *state=absent* and *volumes* is provided, and the data set is not found in the catalog, the module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, then no action is taken. Module completes successfully with *changed=False*. + + + If *state=absent* and *volumes* is provided, and the data set is found in the catalog, the module compares the catalog volume attributes to the provided *volumes*. If they volume attributes are different, the cataloged data set will be uncataloged temporarily while the requested data set be deleted is cataloged. The module will catalog the original data set on completion, if the attempts to catalog fail, no action is taken. Module completes successfully with *changed=False*. + + + If *state=present* and the data set does not exist on the managed node, create and catalog the data set, module completes successfully with *changed=True*. + + + If *state=present* and *replace=True* and the data set is present on the managed node the existing data set is deleted, and a new data set is created and cataloged with the desired attributes, module completes successfully with *changed=True*. + + + If *state=present* and *replace=False* and the data set is present on the managed node, no action taken, module completes successfully with *changed=False*. + + + If *state=present* and *type=member* and the member does not exist in the data set, create a member formatted to store data, module completes successfully with *changed=True*. Note, a PDSE does not allow a mixture of formats such that there is executables (program objects) and data. The member created is formatted to store data, not an executable. + + + If *state=cataloged* and *volumes* is provided and the data set is already cataloged, no action taken, module completes successfully with *changed=False*. + + + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog is successful, module completes successfully with *changed=True*. + + + If *state=cataloged* and *volumes* is provided and the data set is not cataloged, module attempts to perform catalog using supplied *name* and *volumes*. If the attempt to catalog the data set catalog fails, returns failure with *changed=False*. + + + If *state=uncataloged* and the data set is not found, no action taken, module completes successfully with *changed=False*. + + + If *state=uncataloged* and the data set is found, the data set is uncataloged, module completes successfully with *changed=True*. + + + | **required**: False + | **type**: str + | **default**: present + | **choices**: present, absent, cataloged, uncataloged + + + type + The data set type to be used when creating a data set. (e.g ``pdse``) + + ``member`` expects to be used with an existing partitioned data set. + + Choices are case-sensitive. + + | **required**: False + | **type**: str + | **default**: pds + | **choices**: ksds, esds, rrds, lds, seq, pds, pdse, library, basic, large, member, hfs, zfs, gdg + + + space_primary + The amount of primary space to allocate for the dataset. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + | **default**: 5 + + + space_secondary + The amount of secondary space to allocate for the dataset. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + | **default**: 3 + + + space_type + The unit of measurement to use when defining primary and secondary space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + | **required**: False + | **type**: str + | **default**: m + | **choices**: k, m, g, cyl, trk + + + record_format + The format of the data set. (e.g ``FB``) + + Choices are case-sensitive. + + When *type=ksds*, *type=esds*, *type=rrds*, *type=lds* or *type=zfs* then *record_format=None*, these types do not have a default *record_format*. + + | **required**: False + | **type**: str + | **default**: fb + | **choices**: fb, vb, fba, vba, u, f + + + sms_storage_class + The storage class for an SMS-managed dataset. + + Required for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_data_class + The data class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_management_class + The management class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + record_length + The length, in bytes, of each record in the data set. + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + | **required**: False + | **type**: int + + + block_size + The block size to use for the data set. + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + key_offset + The key offset to use when creating a KSDS data set. + + *key_offset* is required when *type=ksds*. + + *key_offset* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + key_length + The key length to use when creating a KSDS data set. + + *key_length* is required when *type=ksds*. + + *key_length* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + empty + Sets the *empty* attribute for Generation Data Groups. + + If false, removes only the oldest GDS entry when a new GDS is created that causes GDG limit to be exceeded. + + If true, removes all GDS entries from a GDG base when a new GDS is created that causes the GDG limit to be exceeded. + + | **required**: False + | **type**: bool + | **default**: False + + + extended + Sets the *extended* attribute for Generation Data Groups. + + If false, allow up to 255 generation data sets (GDSs) to be associated with the GDG. + + If true, allow up to 999 generation data sets (GDS) to be associated with the GDG. + + | **required**: False + | **type**: bool + | **default**: False + + + fifo + Sets the *fifo* attribute for Generation Data Groups. + + If false, the order is the newest GDS defined to the oldest GDS. This is the default value. + + If true, the order is the oldest GDS defined to the newest GDS. + + | **required**: False + | **type**: bool + | **default**: False + + + limit + Sets the *limit* attribute for Generation Data Groups. + + Specifies the maximum number, from 1 to 255(up to 999 if extended), of GDS that can be associated with the GDG being defined. + + *limit* is required when *type=gdg*. + + | **required**: False + | **type**: int + + + purge + Sets the *purge* attribute for Generation Data Groups. + + Specifies whether to override expiration dates when a generation data set (GDS) is rolled off and the ``scratch`` option is set. + + | **required**: False + | **type**: bool + | **default**: False + + + scratch + When ``state=absent``, specifies whether to physically remove the data set from the volume. + + If ``scratch=true``, the data set is deleted and its entry is removed from the volume's VTOC. + + If ``scratch=false``, the data set is uncataloged but not physically removed from the volume. This is the equivalent of using ``NOSCRATCH`` in an ``IDCAMS DELETE`` command. + + The default is ``true`` for non-GDG data sets and ``false`` for GDG data sets. + + | **required**: False + | **type**: bool + + + volumes + If cataloging a data set, *volumes* specifies the name of the volume(s) where the data set is located. + + + If creating a data set, *volumes* specifies the volume(s) where the data set should be created. + + + If *volumes* is provided when *state=present*, and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged. + + + If *volumes* is provided when *state=absent* and the data set is not found in the catalog, `zos_data_set <./zos_data_set.html>`_ will check the volume table of contents to see if the data set exists. If the data set does exist, it will be cataloged and promptly removed from the system. + + + *volumes* is required when *state=cataloged*. + + Accepts a string when using a single volume and a list of strings when using multiple. + + | **required**: False + | **type**: raw + + + replace + When *replace=True*, and *state=present*, existing data set matching *name* will be replaced. + + Replacement is performed by deleting the existing data set and creating a new data set with the same name and desired attributes. Since the existing data set will be deleted prior to creating the new data set, no data set will exist if creation of the new data set fails. + + + If *replace=True*, all data in the original data set will be lost. + + | **required**: False + | **type**: bool + | **default**: False + + + force + Specifies that the data set can be shared with others during a member delete operation which results in the data set you are updating to be simultaneously updated by others. + + This is helpful when a data set is being used in a long running process such as a started task and you are wanting to delete a member. + + The *force=True* option enables sharing of data sets through the disposition *DISP=SHR*. + + The *force=True* only applies to data set members when *state=absent* and *type=member*. + + | **required**: False + | **type**: bool + | **default**: False + + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Create a sequential data set if it does not exist + zos_data_set: + name: someds.name.here + type: seq + state: present + + - name: Create a PDS data set if it does not exist + zos_data_set: + name: someds.name.here + type: pds + space_primary: 5 + space_type: m + record_format: fba + record_length: 25 + + - name: Attempt to replace a data set if it exists + zos_data_set: + name: someds.name.here + type: pds + space_primary: 5 + space_type: m + record_format: u + record_length: 25 + replace: true + + - name: Attempt to replace a data set if it exists. If not found in the catalog, check if it is available on volume 222222, and catalog if found. + zos_data_set: + name: someds.name.here + type: pds + space_primary: 5 + space_type: m + record_format: u + record_length: 25 + volumes: "222222" + replace: true + + - name: Create an ESDS data set if it does not exist + zos_data_set: + name: someds.name.here + type: esds + + - name: Create a KSDS data set if it does not exist + zos_data_set: + name: someds.name.here + type: ksds + key_length: 8 + key_offset: 0 + + - name: Create an RRDS data set with storage class MYDATA if it does not exist + zos_data_set: + name: someds.name.here + type: rrds + sms_storage_class: mydata + + - name: Delete a data set if it exists + zos_data_set: + name: someds.name.here + state: absent + + - name: Uncatalog a data set but do not remove it from the volume. + zos_data_set: + name: someds.name.here + type: seq + state: absent + scratch: false + + - name: Delete a data set if it exists. If data set not cataloged, check on volume 222222 for the data set, and then catalog and delete if found. + zos_data_set: + name: someds.name.here + state: absent + volumes: "222222" + + - name: Write a member to an existing PDS; replace if member exists + zos_data_set: + name: someds.name.here(mydata) + type: member + replace: true + + - name: Write a member to an existing PDS; do not replace if member exists + zos_data_set: + name: someds.name.here(mydata) + type: member + + - name: Remove a member from an existing PDS + zos_data_set: + name: someds.name.here(mydata) + state: absent + type: member + + - name: Remove a member from an existing PDS/E by opening with disposition DISP=SHR + zos_data_set: + name: someds.name.here(mydata) + state: absent + type: member + force: true + + - name: Create multiple partitioned data sets and add one or more members to each + zos_data_set: + batch: + - name: someds.name.here1 + type: pds + space_primary: 5 + space_type: m + record_format: fb + replace: true + - name: someds.name.here1(member1) + type: member + - name: someds.name.here2(member1) + type: member + replace: true + - name: someds.name.here2(member2) + type: member + + - name: Catalog a data set present on volume 222222 if it is uncataloged. + zos_data_set: + name: someds.name.here + state: cataloged + volumes: "222222" + + - name: Uncatalog a data set if it is cataloged. + zos_data_set: + name: someds.name.here + state: uncataloged + + - name: Create a data set on volumes 000000 and 222222 if it does not exist. + zos_data_set: + name: someds.name.here + state: present + volumes: + - "000000" + - "222222" + + + + + + + + + + +Return Values +------------- + + +names + The data set names, including temporary generated data set names, in the order provided to the module. + + | **returned**: always + | **type**: list + | **elements**: str + diff --git a/docs/source/modules/zos_encode.rst b/docs/source/modules/zos_encode.rst new file mode 100644 index 0000000000..4a5e61f798 --- /dev/null +++ b/docs/source/modules/zos_encode.rst @@ -0,0 +1,335 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_encode.py + +.. _zos_encode_module: + + +zos_encode -- Perform encoding operations. +========================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Converts the encoding of characters that are read from a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, or KSDS (VSAM data set). +- Writes the data to a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, or KSDS (VSAM data set). + + + + + +Parameters +---------- + + +encoding + Specifies which encodings the destination file or data set should be converted from and to. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: False + | **type**: dict + + + from + The character set of the source *src*. + + | **required**: False + | **type**: str + | **default**: IBM-1047 + + + to + The destination *dest* character set for the output to be written as. + + | **required**: False + | **type**: str + | **default**: ISO8859-1 + + + +src + The location can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). + + The USS path or file must be an absolute pathname. + + If *src* is a USS directory, all files will be encoded. + + Encoding a whole generation data group (GDG) is not supported. + + | **required**: True + | **type**: str + + +dest + The location where the converted characters are output. + + The destination *dest* can be a UNIX System Services (USS) file or path, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, a generation data set (GDS) or KSDS (VSAM data set). + + If the length of the PDSE member name used in *dest* is greater than 8 characters, the member name will be truncated when written out. + + If *dest* is not specified, the *src* will be used as the destination and will overwrite the *src* with the character set in the option *to_encoding*. + + The USS file or path must be an absolute pathname. + + If *dest* is a data set, it must be already allocated. + + | **required**: False + | **type**: str + + +backup + Creates a backup file or backup data set for *dest*, including the timestamp information to ensure that you retrieve the original file. + + *backup_name* can be used to specify a backup file name if *backup=true*. + + | **required**: False + | **type**: bool + | **default**: False + + +backup_name + Specify the USS file name or data set name for the dest backup. + + If dest is a USS file or path, *backup_name* must be a file or path name, and the USS path or file must be an absolute pathname. + + If dest is an MVS data set, the *backup_name* must be an MVS data set name. + + If *backup_name* is not provided, the default backup name will be used. The default backup name for a USS file or path will be the destination file or path name appended with a timestamp, e.g. /path/file_name.2020-04-23-08-32-29-bak.tar. If dest is an MVS data set, the default backup name will be a random name generated by IBM Z Open Automation Utilities. + + ``backup_name`` will be returned on either success or failure of module execution such that data can be retrieved. + + If *backup_name* is a generation data set (GDS), it must be a relative positive name (for example, V(HLQ.USER.GDG(+1\))). + + | **required**: False + | **type**: str + + +backup_compress + Determines if backups to USS files or paths should be compressed. + + *backup_compress* is only used when *backup=true*. + + | **required**: False + | **type**: bool + | **default**: False + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Convert file encoding from IBM-1047 to ISO8859-1 for the same file + zos_encode: + src: /zos_encode/test.data + + - name: Convert file encoding from IBM-1047 to ISO8859-1 to another file with + backup + zos_encode: + src: /zos_encode/test.data + dest: /zos_encode_out/test.out + encoding: + from: IBM-1047 + to: ISO8859-1 + backup: true + backup_compress: true + + - name: Convert file encoding from IBM-1047 to ISO8859-1 to a directory + zos_encode: + src: /zos_encode/test.data + dest: /zos_encode_out/ + + - name: Convert file encoding from all files in a directory to another + directory + zos_encode: + src: /zos_encode/ + dest: /zos_encode_out/ + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a USS file to a sequential data set + zos_encode: + src: /zos_encode/test.data + dest: USER.TEST.PS + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from files in a directory to a partitioned + data set + zos_encode: + src: /zos_encode/ + dest: USER.TEST.PDS + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a USS file to a partitioned data set + member + zos_encode: + src: /zos_encode/test.data + dest: USER.TEST.PDS(TESTDATA) + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a sequential data set to a USS file + zos_encode: + src: USER.TEST.PS + dest: /zos_encode/test.data + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a PDS encoding to a USS directory + zos_encode: + src: USER.TEST.PDS + dest: /zos_encode/ + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a sequential data set to another + sequential data set + zos_encode: + src: USER.TEST.PS + dest: USER.TEST1.PS + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a sequential data set to a + partitioned data set (extended) member + zos_encode: + src: USER.TEST.PS + dest: USER.TEST1.PDS(TESTDATA) + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a USS file to a VSAM data set + zos_encode: + src: /zos_encode/test.data + dest: USER.TEST.VS + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a VSAM data set to a USS file + zos_encode: + src: USER.TEST.VS + dest: /zos_encode/test.data + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a VSAM data set to a sequential + data set + zos_encode: + src: USER.TEST.VS + dest: USER.TEST.PS + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Convert file encoding from a sequential data set a VSAM data set + zos_encode: + src: USER.TEST.PS + dest: USER.TEST.VS + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a USS file to a generation data set + zos_encode: + src: /zos_encode/test.data + dest: USER.TEST.GDG(0) + encoding: + from: ISO8859-1 + to: IBM-1047 + + - name: Convert file encoding from a USS file to a data set while using a GDG for backup + zos_encode: + src: /zos_encode/test.data + dest: USER.TEST.PS + encoding: + from: ISO8859-1 + to: IBM-1047 + backup: true + backup_name: USER.BACKUP.GDG(+1) + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. + + For supported character sets used to encode data, refer to the `documentation `_. + + + + + + + +Return Values +------------- + + +src + The location of the input characters identified in option *src*. + + | **returned**: always + | **type**: str + +dest + The name of the output file or data set. If dest is a USS file or path and the status has been changed in the conversion, the file status will also be returned. + + | **returned**: always + | **type**: str + +backup_name + Name of the backup file created. + + | **returned**: changed and if backup=yes + | **type**: str + | **sample**: /path/file_name.2020-04-23-08-32-29-bak.tar + diff --git a/docs/source/modules/zos_fetch.rst b/docs/source/modules/zos_fetch.rst new file mode 100644 index 0000000000..8a341dfcdc --- /dev/null +++ b/docs/source/modules/zos_fetch.rst @@ -0,0 +1,359 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_fetch.py + +.. _zos_fetch_module: + + +zos_fetch -- Fetch data from z/OS +================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- This module fetches a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE, generation data set (GDS), generation data group (GDG), or KSDS (VSAM data set) from a remote z/OS system. +- When fetching a sequential data set, the destination file name will be the same as the data set name. +- When fetching a PDS or PDSE, the destination will be a directory with the same name as the PDS or PDSE. +- When fetching a PDS/PDSE member, destination will be a file. +- Files that already exist at ``dest`` will be overwritten if they are different than ``src``. +- When fetching a GDS, the relative name will be resolved to its absolute one. +- When fetching a generation data group, the destination will be a directory with the same name as the GDG. + + + + + +Parameters +---------- + + +src + Name of a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS, PDSE, GDS, GDG or KSDS (VSAM data set). + + USS file paths should be absolute paths. + + | **required**: True + | **type**: str + + +dest + Local path where the file or data set will be stored. + + If dest is an existing file or directory, the contents will be overwritten. + + | **required**: True + | **type**: path + + +fail_on_missing + When set to true, the task will fail if the source file is missing. + + | **required**: False + | **type**: bool + | **default**: true + + +validate_checksum + Verify that the source and destination checksums match after the files are fetched. + + | **required**: False + | **type**: bool + | **default**: true + + +flat + If set to "true", override the default behavior of appending hostname/path/to/file to the destination, instead the file or data set will be fetched to the destination directory without appending remote hostname to the destination. + + | **required**: False + | **type**: bool + | **default**: false + + +is_binary + Specifies if the file being fetched is a binary. + + | **required**: False + | **type**: bool + | **default**: false + + +use_qualifier + Indicates whether the data set high level qualifier should be used when fetching. + + | **required**: False + | **type**: bool + | **default**: false + + +encoding + Specifies which encodings the fetched data set should be converted from and to. If this parameter is not provided, encoding conversions will not take place. + + | **required**: False + | **type**: dict + + + from + The character set of the source *src*. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: True + | **type**: str + + + to + The destination *dest* character set for the output to be written as. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: True + | **type**: str + + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +ignore_sftp_stderr + During data transfer through SFTP, the SFTP command directs content to stderr. By default, the module essentially ignores the stderr stream produced by SFTP and continues execution. The user is able to override this behavior by setting this parameter to ``false``. By doing so, any content written to stderr is considered an error by Ansible and will cause the module to fail. + + When Ansible verbosity is set to greater than 3, either through the command line interface (CLI) using **-vvvv** or through environment variables such as **verbosity = 4**, then this parameter will automatically be set to ``true``. + + | **required**: False + | **type**: bool + | **default**: True + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: none + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Fetch file from USS and store in /tmp/fetched/hostname/tmp/somefile + zos_fetch: + src: /tmp/somefile + dest: /tmp/fetched + + - name: Fetch a sequential data set and store in /tmp/SOME.DATA.SET + zos_fetch: + src: SOME.DATA.SET + dest: /tmp/ + flat: true + + - name: Fetch a PDS as binary and store in /tmp/SOME.PDS.DATASET + zos_fetch: + src: SOME.PDS.DATASET + dest: /tmp/ + flat: true + is_binary: true + + - name: Fetch a UNIX file and don't validate its checksum + zos_fetch: + src: /tmp/somefile + dest: /tmp/ + flat: true + validate_checksum: false + + - name: Fetch a VSAM data set + zos_fetch: + src: USER.TEST.VSAM + dest: /tmp/ + flat: true + + - name: Fetch a PDS member named 'DATA' + zos_fetch: + src: USER.TEST.PDS(DATA) + dest: /tmp/ + flat: true + + - name: Fetch a USS file and convert from IBM-037 to ISO8859-1 + zos_fetch: + src: /etc/profile + dest: /tmp/ + encoding: + from: IBM-037 + to: ISO8859-1 + flat: true + + - name: Fetch the current generation data set from a GDG + zos_fetch: + src: USERHLQ.DATA.SET(0) + dest: /tmp/ + flat: true + + - name: Fetch a previous generation data set from a GDG + zos_fetch: + src: USERHLQ.DATA.SET(-3) + dest: /tmp/ + flat: true + + - name: Fetch a generation data group + zos_fetch: + src: USERHLQ.TEST.GDG + dest: /tmp/ + flat: true + + + + +Notes +----- + +.. note:: + When fetching PDSE and VSAM data sets, temporary storage will be used on the remote z/OS system. After the PDSE or VSAM data set is successfully transferred, the temporary storage will be deleted. The size of the temporary storage will correspond to the size of PDSE or VSAM data set being fetched. If module execution fails, the temporary storage will be deleted. + + To ensure optimal performance, data integrity checks for PDS, PDSE, and members of PDS or PDSE are done through the transfer methods used. As a result, the module response will not include the ``checksum`` parameter. + + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be fetched, it should be cataloged first. + + Fetching HFS or ZFS type data sets is currently not supported. + + For supported character sets used to encode data, refer to the `documentation `_. + + This module uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_data_set_module` + - :ref:`zos_copy_module` + + + + +Return Values +------------- + + +file + The source file path or data set on the remote machine. + + | **returned**: success + | **type**: str + | **sample**: SOME.DATA.SET + +dest + The destination file path on the controlling machine. + + | **returned**: success + | **type**: str + | **sample**: /tmp/SOME.DATA.SET + +is_binary + Indicates the transfer mode that was used to fetch. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + +checksum + The SHA256 checksum of the fetched file or data set. checksum validation is performed for all USS files and sequential data sets. + + | **returned**: success and src is a non-partitioned data set + | **type**: str + | **sample**: 8d320d5f68b048fc97559d771ede68b37a71e8374d1d678d96dcfa2b2da7a64e + +data_set_type + Indicates the fetched data set type. + + | **returned**: success + | **type**: str + | **sample**: PDSE + +note + Notice of module failure when ``fail_on_missing`` is false. + + | **returned**: failure and fail_on_missing=false + | **type**: str + | **sample**: The data set USER.PROCLIB does not exist. No data was fetched. + +msg + Message returned on failure. + + | **returned**: failure + | **type**: str + | **sample**: The source 'TEST.DATA.SET' does not exist or is uncataloged. + +stdout + The stdout from a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: DATA SET 'USER.PROCLIB' NOT IN CATALOG + +stderr + The stderr of a USS command or MVS command, if applicable + + | **returned**: failure + | **type**: str + | **sample**: File /tmp/result.log not found. + +stdout_lines + List of strings containing individual lines from stdout + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "u\u0027USER.TEST.PDS NOT IN CATALOG..\u0027" + ] + +stderr_lines + List of strings containing individual lines from stderr. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "u\u0027Unable to traverse PDS USER.TEST.PDS not found\u0027" + ] + +rc + The return code of a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: int + | **sample**: 8 + diff --git a/docs/source/modules/zos_find.rst b/docs/source/modules/zos_find.rst new file mode 100644 index 0000000000..1c3d5222c1 --- /dev/null +++ b/docs/source/modules/zos_find.rst @@ -0,0 +1,398 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_find.py + +.. _zos_find_module: + + +zos_find -- Find matching data sets +=================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Return a list of data sets based on specific criteria. +- Multiple criteria can be added (AND'd) together. +- The ``zos_find`` module can only find MVS data sets. Use the `find `_ module to find USS files. + + + + + +Parameters +---------- + + +age + Select data sets whose age is equal to or greater than the specified time. + + Use a negative age to find data sets equal to or less than the specified time. + + You can choose days, weeks, months or years by specifying the first letter of any of those words (e.g., "1w"). The default is days. + + Age is determined by using the 'referenced date' of the data set. + + | **required**: False + | **type**: str + + +age_stamp + Choose the age property against which to compare age. + + ``creation_date`` is the date the data set was created and ``ref_date`` is the date the data set was last referenced. + + ``ref_date`` is only applicable to sequential and partitioned data sets. + + | **required**: False + | **type**: str + | **default**: creation_date + | **choices**: creation_date, ref_date + + +contains + A string which should be matched against the data set content or data set member content. + + | **required**: False + | **type**: str + + +excludes + Data sets whose names match an excludes pattern are culled from patterns matches. Multiple patterns can be specified using a list. + + The pattern can be a regular expression. + + If the pattern is a regular expression, it must match the full data set name. + + | **required**: False + | **type**: list + | **elements**: str + + +patterns + One or more data set or member patterns. + + The patterns restrict the list of data sets or members to be returned to those names that match at least one of the patterns specified. Multiple patterns can be specified using a list. + + This parameter expects a list, which can be either comma separated or YAML. + + If ``pds_patterns`` is provided, ``patterns`` must be member patterns. + + When searching for members within a PDS/PDSE, pattern can be a regular expression. + + | **required**: True + | **type**: list + | **elements**: str + + +size + Select data sets whose size is equal to or greater than the specified size. + + Use a negative size to find files equal to or less than the specified size. + + Unqualified values are in bytes but b, k, m, g, and t can be appended to specify bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively. + + Filtering by size is currently only valid for sequential and partitioned data sets. + + | **required**: False + | **type**: str + + +pds_patterns + List of PDS/PDSE to search. Wildcard is possible. + + Required when searching for data set members. + + Valid only for ``nonvsam`` resource types. Otherwise ignored. + + | **required**: False + | **type**: list + | **elements**: str + + +resource_type + The types of resources to search. + + ``nonvsam`` refers to one of SEQ, LIBRARY (PDSE), PDS, LARGE, BASIC, EXTREQ, or EXTPREF. + + ``cluster`` refers to a VSAM cluster. The ``data`` and ``index`` are the data and index components of a VSAM cluster. + + ``gdg`` refers to Generation Data Groups. The module searches based on the GDG base name. + + ``migrated`` refers to listing migrated datasets. Only ``excludes`` and ``migrated_type`` options can be used along with this option. The module only searches based on dataset patterns. + + | **required**: False + | **type**: list + | **elements**: str + | **default**: nonvsam + | **choices**: nonvsam, cluster, data, index, gdg, migrated + + +migrated_type + A migrated data set related attribute, only valid when ``resource_type=migrated``. + + If provided, will search for only those types of migrated datasets. + + | **required**: False + | **type**: list + | **elements**: str + | **default**: ['cluster', 'data', 'index', 'nonvsam'] + | **choices**: nonvsam, cluster, data, index + + +volume + If provided, only the data sets allocated in the specified list of volumes will be searched. + + | **required**: False + | **type**: list + | **elements**: str + + +empty + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *empty* attribute set as provided. + + | **required**: False + | **type**: bool + + +extended + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *extended* attribute set as provided. + + | **required**: False + | **type**: bool + + +fifo + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *fifo* attribute set as provided. + + | **required**: False + | **type**: bool + + +limit + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *limit* attribute set as provided. + + | **required**: False + | **type**: int + + +purge + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *purge* attribute set as provided. + + | **required**: False + | **type**: bool + + +scratch + A GDG attribute, only valid when ``resource_type=gdg``. + + If provided, will search for data sets with *scratch* attribute set as provided. + + | **required**: False + | **type**: bool + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Find all data sets with HLQ 'IMS.LIB' or 'IMSTEST.LIB' that contain the word 'hello' + zos_find: + patterns: + - IMS.LIB.* + - IMSTEST.LIB.* + contains: 'hello' + age: 2d + + - name: Search for 'rexx' in all datasets matching IBM.TSO.*.C?? + zos_find: + patterns: + - IBM.TSO.*.C?? + contains: 'rexx' + + - name: Exclude data sets that have a low level qualifier 'TEST' + zos_find: + patterns: 'IMS.LIB.*' + contains: 'hello' + excludes: '.*TEST' + + - name: Find all members starting with characters 'TE' in a given list of PDS patterns + zos_find: + patterns: '^te.*' + pds_patterns: + - IMSTEST.TEST.* + - IMSTEST.USER.* + - USER.*.LIB + + - name: Find all data sets greater than 2MB and allocated in one of the specified volumes + zos_find: + patterns: 'USER.*' + size: 2m + volumes: + - SCR03 + - IMSSUN + + - name: Find all VSAM clusters starting with the word 'USER' + zos_find: + patterns: + - USER.* + resource_type: + - 'cluster' + + - name: Find all Generation Data Groups starting with the word 'USER' and specific GDG attributes. + zos_find: + patterns: + - USER.* + resource_type: + - 'gdg' + limit: 30 + scratch: true + purge: true + + - name: Find all migrated and nonvsam data sets starting with the word 'USER' + zos_find: + patterns: + - USER.* + resource_type: + - 'migrated' + migrated_type: + - 'nonvsam' + + + + +Notes +----- + +.. note:: + Only cataloged data sets will be searched. If an uncataloged data set needs to be searched, it should be cataloged first. The `zos_data_set <./zos_data_set.html>`_ module can be used to catalog uncataloged data sets. + + The `zos_find <./zos_find.html>`_ module currently does not support wildcards for high level qualifiers. For example, ``SOME.*.DATA.SET`` is a valid pattern, but ``*.DATA.SET`` is not. + + If a data set pattern is specified as ``USER.*``, the matching data sets will have two name segments such as ``USER.ABC``, ``USER.XYZ`` etc. If a wildcard is specified as ``USER.*.ABC``, the matching data sets will have three name segments such as ``USER.XYZ.ABC``, ``USER.TEST.ABC`` etc. + + The time taken to execute the module is proportional to the number of data sets present on the system and how large the data sets are. + + When searching for content within data sets, only non-binary content is considered. + + As a migrated data set's information can't be retrieved without recalling it first, other options besides ``excludes`` and ``migrated_type`` are not supported. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_data_set_module` + + + + +Return Values +------------- + + +data_sets + All matches found with the specified criteria. + + | **returned**: success + | **type**: list + | **sample**: + + .. code-block:: json + + [ + { + "members": { + "COBU": null, + "MC2CNAM": null, + "TINAD": null + }, + "name": "IMS.CICS13.USERLIB", + "type": "NONVSAM" + }, + { + "name": "SAMPLE.DATA.SET", + "type": "CLUSTER" + }, + { + "name": "SAMPLE.VSAM.DATA", + "type": "DATA" + } + ] + +matched + The number of matched data sets found. + + | **returned**: success + | **type**: int + | **sample**: 49 + +examined + The number of data sets searched. + + | **returned**: success + | **type**: int + | **sample**: 158 + +msg + Failure message returned by the module. + + | **returned**: failure + | **type**: str + | **sample**: Error while gathering data set information + +stdout + The stdout from a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: Searching dataset IMSTESTL.COMNUC + +stderr + The stderr of a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + | **sample**: No such file or directory "/tmp/foo" + +rc + The return code of a USS or MVS command, if applicable. + + | **returned**: failure + | **type**: int + | **sample**: 8 + diff --git a/docs/source/modules/zos_gather_facts.rst b/docs/source/modules/zos_gather_facts.rst new file mode 100644 index 0000000000..1821906a4f --- /dev/null +++ b/docs/source/modules/zos_gather_facts.rst @@ -0,0 +1,138 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_gather_facts.py + +.. _zos_gather_facts_module: + + +zos_gather_facts -- Gather z/OS system facts. +============================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Retrieve variables from target z/OS systems. +- Variables are added to the *ansible_facts* dictionary, available to playbooks. +- Apply filters on the *gather_subset* list to reduce the variables that are added to the *ansible_facts* dictionary. +- Note, the module will fail fast if any unsupported options are provided. This is done to raise awareness of a failure in an automation setting. + + + + + +Parameters +---------- + + +gather_subset + If specified, it will collect facts that come under the specified subset (eg. ipl will return ipl facts). Specifying subsets is recommended to reduce time in gathering facts when the facts needed are in a specific subset. + + The following subsets are available ``ipl``, ``cpu``, ``sys``, and ``iodf``. Depending on the version of ZOAU, additional subsets may be available. + + | **required**: False + | **type**: list + | **elements**: str + | **default**: ['all'] + + +filter + Filter out facts from the *ansible_facts* dictionary. + + Uses shell-style `fnmatch `_ pattern matching to filter out the collected facts. + + An empty list means 'no filter', same as providing '*'. + + Filtering is performed after the facts are gathered such that no compute is saved when filtering. Filtering only reduces the number of variables that are added to the *ansible_facts* dictionary. To restrict the facts that are collected, refer to the *gather_subset* parameter. + + | **required**: False + | **type**: list + | **elements**: str + | **default**: [] + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Return all available z/OS facts. + ibm.ibm_zos_core.zos_gather_facts: + + - name: Return z/OS facts in the systems subset ('sys'). + ibm.ibm_zos_core.zos_gather_facts: + gather_subset: sys + + - name: Return z/OS facts in the subsets ('ipl' and 'sys') and filter out all + facts that do not match 'parmlib'. + ibm.ibm_zos_core.zos_gather_facts: + gather_subset: + - ipl + - sys + filter: + - "*parmlib*" + + + + + + + + + + +Return Values +------------- + + +ansible_facts + Collection of facts that are gathered from the z/OS systems. + + | **returned**: when collected + | **type**: dict + | **sample**: + + .. code-block:: json + + [ + { + "ansible_facts": { + "arch_level": "2", + "hw_name": "SYSZD6", + "ipl_volume": "RES820", + "lpar_name": "SVLLAB01", + "primary_jes": "JES2", + "product_mod_level": "00", + "product_name": "z/OS", + "product_owner": "IBM CORP", + "product_release": "05", + "product_version": "02", + "smf_name": "3090", + "sys_name": "EC33018A", + "sysplex_name": "SVPLEX1", + "vm_name": "EC33018A" + } + } + ] + diff --git a/docs/source/modules/zos_job_output.rst b/docs/source/modules/zos_job_output.rst new file mode 100644 index 0000000000..6b992d02dc --- /dev/null +++ b/docs/source/modules/zos_job_output.rst @@ -0,0 +1,523 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_output.py + +.. _zos_job_output_module: + + +zos_job_output -- Display job output +==================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Display the z/OS job output for a given criteria (Job id/Job name/owner) with/without a data definition name as a filter. +- At least provide a job id/job name/owner. +- The job id can be specific such as "STC02560", or one that uses a pattern such as "STC*" or "*". +- The job name can be specific such as "TCPIP", or one that uses a pattern such as "TCP*" or "*". +- The owner can be specific such as "IBMUSER", or one that uses a pattern like "*". +- If there is no ddname, or if ddname="?", output of all the ddnames under the given job will be displayed. +- If SYSIN DDs are needed, *sysin_dd* should be set to ``true``. + + + + + +Parameters +---------- + + +job_id + The z/OS job ID of the job containing the spool file. (e.g "STC02560", "STC*") + + | **required**: False + | **type**: str + + +job_name + The name of the batch job. (e.g "TCPIP", "C*") + + | **required**: False + | **type**: str + + +owner + The owner who ran the job. (e.g "IBMUSER", "*") + + | **required**: False + | **type**: str + + +ddname + Data definition name (show only this DD on a found job). (e.g "JESJCL", "?") + + | **required**: False + | **type**: str + + +sysin_dd + Whether to include SYSIN DDs as part of the output. + + | **required**: False + | **type**: bool + | **default**: False + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Job output with ddname + zos_job_output: + job_id: "STC02560" + ddname: "JESMSGLG" + + - name: JES Job output without ddname + zos_job_output: + job_id: "STC02560" + + - name: JES Job output with all ddnames + zos_job_output: + job_id: "STC*" + job_name: "*" + owner: "IBMUSER" + ddname: "?" + + - name: Query a job's output including SYSIN DDs + zos_job_output: + job_id: "JOB00548" + sysin_dd: true + + + + + + + + + + +Return Values +------------- + + +jobs + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + + | **returned**: success + | **type**: list + | **elements**: dict + | **sample**: + + .. code-block:: json + + [ + { + "class": "R", + "content_type": "JOB", + "cpu_time": 1414, + "ddnames": [ + { + "byte_count": "775", + "content": [ + "1 J E S 2 J O B L O G -- S Y S T E M S T L 1 -- N O D E S T L 1 ", + "0 ", + " 10.25.48 JOB00134 ---- TUESDAY, 18 FEB 2020 ----", + " 10.25.48 JOB00134 IRR010I USERID OMVSADM IS ASSIGNED TO THIS JOB.", + " 10.25.48 JOB00134 $HASP375 JES2 ESTIMATED LINES EXCEEDED", + " 10.25.48 JOB00134 ICH70001I OMVSADM LAST ACCESS AT 10:25:47 ON TUESDAY, FEBRUARY 18, 2020", + " 10.25.48 JOB00134 $HASP375 HELLO ESTIMATED LINES EXCEEDED", + " 10.25.48 JOB00134 $HASP373 HELLO STARTED - INIT 3 - CLASS R - SYS STL1", + " 10.25.48 JOB00134 SMF000I HELLO STEP0001 IEBGENER 0000", + " 10.25.48 JOB00134 $HASP395 HELLO ENDED - RC=0000", + "0------ JES2 JOB STATISTICS ------", + "- 18 FEB 2020 JOB EXECUTION DATE", + "- 16 CARDS READ", + "- 59 SYSOUT PRINT RECORDS", + "- 0 SYSOUT PUNCH RECORDS", + "- 6 SYSOUT SPOOL KBYTES", + "- 0.00 MINUTES EXECUTION TIME" + ], + "ddname": "JESMSGLG", + "id": "2", + "procstep": "", + "record_count": "17", + "stepname": "JES2" + }, + { + "byte_count": "574", + "content": [ + " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", + " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", + " //* ", + " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", + " //* ", + " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", + " //* AND WILL CAUSE A JCL ERROR ", + " //* ", + " 2 //STEP0001 EXEC PGM=IEBGENER ", + " 3 //SYSIN DD DUMMY ", + " 4 //SYSPRINT DD SYSOUT=* ", + " 5 //SYSUT1 DD * ", + " 6 //SYSUT2 DD SYSOUT=* ", + " 7 // " + ], + "ddname": "JESJCL", + "id": "3", + "procstep": "", + "record_count": "14", + "stepname": "JES2" + }, + { + "byte_count": "1066", + "content": [ + " ICH70001I OMVSADM LAST ACCESS AT 10:25:47 ON TUESDAY, FEBRUARY 18, 2020", + " IEF236I ALLOC. FOR HELLO STEP0001", + " IEF237I DMY ALLOCATED TO SYSIN", + " IEF237I JES2 ALLOCATED TO SYSPRINT", + " IEF237I JES2 ALLOCATED TO SYSUT1", + " IEF237I JES2 ALLOCATED TO SYSUT2", + " IEF142I HELLO STEP0001 - STEP WAS EXECUTED - COND CODE 0000", + " IEF285I OMVSADM.HELLO.JOB00134.D0000102.? SYSOUT ", + " IEF285I OMVSADM.HELLO.JOB00134.D0000101.? SYSIN ", + " IEF285I OMVSADM.HELLO.JOB00134.D0000103.? SYSOUT ", + " IEF373I STEP/STEP0001/START 2020049.1025", + " IEF032I STEP/STEP0001/STOP 2020049.1025 ", + " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC ", + " VIRT: 60K SYS: 240K EXT: 0K SYS: 11548K", + " ATB- REAL: 8K SLOTS: 0K", + " VIRT- ALLOC: 10M SHRD: 0M", + " IEF375I JOB/HELLO /START 2020049.1025", + " IEF033I JOB/HELLO /STOP 2020049.1025 ", + " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC " + ], + "ddname": "JESYSMSG", + "id": "4", + "procstep": "", + "record_count": "19", + "stepname": "JES2" + }, + { + "byte_count": "251", + "content": [ + "1DATA SET UTILITY - GENERATE PAGE 0001 ", + "-IEB352I WARNING: ONE OR MORE OF THE OUTPUT DCB PARMS COPIED FROM INPUT ", + " ", + " PROCESSING ENDED AT EOD " + ], + "ddname": "SYSPRINT", + "id": "102", + "procstep": "", + "record_count": "4", + "stepname": "STEP0001" + }, + { + "byte_count": "49", + "content": [ + " HELLO, WORLD " + ], + "ddname": "SYSUT2", + "id": "103", + "procstep": "", + "record_count": "1", + "stepname": "STEP0001" + } + ], + "duration": 0, + "execution_node": "STL1", + "execution_time": "00:00:03", + "job_class": "R", + "job_id": "JOB00134", + "job_name": "HELLO", + "origin_node": "STL1", + "owner": "OMVSADM", + "priority": "1", + "program_name": "IEBGENER", + "queue_position": "58", + "ret_code": { + "code": 0, + "msg": "CC 0000", + "msg_code": "0000", + "msg_txt": "", + "steps": [ + { + "step_cc": 0, + "step_name": "STEP0001" + } + ] + }, + "subsystem": "STL1", + "system": "STL1" + } + ] + + job_id + The z/OS job ID of the job containing the spool file. + + | **type**: str + | **sample**: JOB00134 + + job_name + The name of the batch job. + + | **type**: str + | **sample**: HELLO + + system + The job entry system that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + subsystem + The job entry subsystem that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + cpu_time + Sum of the CPU time used by each job step, in microseconds. + + | **type**: int + | **sample**: 5 + + execution_node + Execution node that picked the job and executed it. + + | **type**: str + | **sample**: STL1 + + origin_node + Origin node that submitted the job. + + | **type**: str + | **sample**: STL1 + + class + Identifies the data set used in a system output data set, usually called a sysout data set. + + | **type**: str + + content_type + Type of address space used by the job, can be one of the following types. - APPC for an APPC Initiator. - JGRP for a JOBGROUP. - JOB for a Batch job. - STC for a Started task. - TSU for a Time sharing user. - \? for an unknown or pending job. + + | **type**: str + | **sample**: JOB + + creation_date + Date, local to the target system, when the job was created. + + | **type**: str + | **sample**: 2023-05-04 + + creation_time + Time, local to the target system, when the job was created. + + | **type**: str + | **sample**: 14:15:00 + + execution_time + Total duration time of the job execution, if it has finished. If the job is still running, it represents the time elapsed from the job execution start and current time. + + | **type**: str + | **sample**: 00:00:10 + + ddnames + Data definition names. + + | **type**: list + | **elements**: dict + + ddname + Data definition name. + + | **type**: str + | **sample**: JESMSGLG + + record_count + Count of the number of lines in a print data set. + + | **type**: int + | **sample**: 17 + + id + The file ID. + + | **type**: str + | **sample**: 2 + + stepname + A step name is name that identifies the job step so that other JCL statements or the operating system can refer to it. + + | **type**: str + | **sample**: JES2 + + procstep + Identifies the set of statements inside JCL grouped together to perform a particular function. + + | **type**: str + | **sample**: PROC1 + + byte_count + Byte size in a print data set. + + | **type**: int + | **sample**: 574 + + content + The ddname content. + + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", + " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", + " //* ", + " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", + " //* ", + " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", + " //* AND WILL CAUSE A JCL ERROR ", + " //* ", + " 2 //STEP0001 EXEC PGM=IEBGENER ", + " 3 //SYSIN DD DUMMY ", + " 4 //SYSPRINT DD SYSOUT=* ", + " 5 //SYSUT1 DD * ", + " 6 //SYSUT2 DD SYSOUT=* ", + " 7 // " + ] + + + job_class + Job class for this job. + + | **type**: str + | **sample**: A + + svc_class + Service class for this job. + + | **type**: str + | **sample**: C + + priority + A numeric indicator of the job priority assigned through JES. + + | **type**: int + | **sample**: 4 + + asid + The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. + + | **type**: int + + queue_position + The position within the job queue where the jobs resides. + + | **type**: int + | **sample**: 3 + + program_name + The name of the program found in the job's last completed step found in the PGM parameter. + + | **type**: str + | **sample**: IEBGENER + + ret_code + Return code output collected from job log. + + | **type**: dict + | **sample**: + + .. code-block:: json + + { + "ret_code": { + "code": 0, + "msg": "CC 0000", + "msg_code": "0000", + "msg_txt": "", + "steps": [ + { + "step_cc": 0, + "step_name": "STEP0001" + } + ] + } + } + + msg + Return code or abend resulting from the job submission. + + | **type**: str + | **sample**: CC 0000 + + msg_code + Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + + | **type**: str + | **sample**: S0C4 + + msg_txt + Returns additional information related to the job. + + | **type**: str + | **sample**: No job can be located with this job name: HELLO + + code + Return code converted to integer value (when possible). + + | **type**: int + + steps + Series of JCL steps that were executed and their return codes. + + | **type**: list + | **elements**: dict + + step_name + Name of the step shown as "was executed" in the DD section. + + | **type**: str + | **sample**: STEP0001 + + step_cc + The CC returned for this step in the DD section. + + | **type**: int + + + + +changed + Indicates if any changes were made during module operation + + | **returned**: on success + | **type**: bool + diff --git a/docs/source/modules/zos_job_query.rst b/docs/source/modules/zos_job_query.rst new file mode 100644 index 0000000000..38cea61e34 --- /dev/null +++ b/docs/source/modules/zos_job_query.rst @@ -0,0 +1,389 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_query.py + +.. _zos_job_query_module: + + +zos_job_query -- Query job status +================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- List z/OS job(s) and the current status of the job(s). +- Uses job_name to filter the jobs by the job name. +- Uses job_id to filter the jobs by the job identifier. +- Uses owner to filter the jobs by the job owner. +- Uses system to filter the jobs by system where the job is running (or ran) on. + + + + + +Parameters +---------- + + +job_name + The job name to query. + + A job name can be up to 8 characters long. + + The *job_name* can contain include multiple wildcards. + + The asterisk (`*`) wildcard will match zero or more specified characters. + + Note that using this value will query the system for '*' and then return just matching values. + + This may lead to security issues if there are read-access limitations on some users or jobs. + + | **required**: False + | **type**: str + | **default**: * + + +owner + Identifies the owner of the job. + + If no owner is set, the default set is 'none' and all jobs will be queried. + + | **required**: False + | **type**: str + + +job_id + The job id that has been assigned to the job. + + A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. + + When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. + + The *job_id* can contain include multiple wildcards. + + The asterisk (`*`) wildcard will match zero or more specified characters. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Query a job with a job name of 'JOB12345' + zos_job_query: + job_name: "JOB12345" + + - name: Query jobs using a wildcard to match any job id begging with 'JOB12' + zos_job_query: + job_id: "JOB12*" + + - name: Query jobs using wildcards to match any job name begging with 'H' and ending in 'O'. + zos_job_query: + job_name: "H*O" + + - name: Query jobs using a wildcards to match a range of job id(s) that include 'JOB' and '014'. + zos_job_query: + job_id: JOB*014* + + - name: Query all job names beginning wih 'H' that match job id that includes '14'. + zos_job_query: + job_name: "H*" + job_id: "JOB*14*" + + - name: Query all jobs names beginning with 'LINK' for owner 'ADMIN'. + zos_job_query: + job_name: "LINK*" + owner: ADMIN + + + + + + + + + + +Return Values +------------- + + +changed + True if the state was changed, otherwise False. + + | **returned**: always + | **type**: bool + +jobs + The output information for a list of jobs matching specified criteria. If no job status is found, this will return ret_code dictionary with parameter msg_txt = The job could not be found. + + | **returned**: success + | **type**: list + | **elements**: dict + | **sample**: + + .. code-block:: json + + [ + { + "asid": 0, + "content_type": "JOB", + "cpu_time": 1414, + "creation_date": "2023-05-03", + "creation_time": "12:13:00", + "execution_node": "STL1", + "execution_time": "00:00:02", + "job_class": "K", + "job_id": "JOB01427", + "job_name": "LINKJOB", + "origin_node": "STL1", + "owner": "ADMIN", + "priority": 1, + "queue_position": 3, + "ret_code": "null", + "subsystem": "STL1", + "svc_class": "?", + "system": "STL1" + }, + { + "asid": 4, + "content_type": "JOB", + "cpu_time": 1414, + "creation_date": "2023-05-03", + "creation_time": "12:14:00", + "execution_node": "STL1", + "execution_time": "00:00:03", + "job_class": "A", + "job_id": "JOB16577", + "job_name": "LINKCBL", + "origin_node": "STL1", + "owner": "ADMIN", + "priority": 0, + "queue_position": 0, + "ret_code": { + "code": "null", + "msg": "CANCELED" + }, + "subsystem": "STL1", + "svc_class": "E", + "system": "STL1" + } + ] + + job_name + The name of the batch job. + + | **type**: str + | **sample**: LINKJOB + + owner + The owner who ran the job. + + | **type**: str + | **sample**: ADMIN + + job_id + Unique job identifier assigned to the job by JES. + + | **type**: str + | **sample**: JOB01427 + + content_type + Type of address space used by the job, can be one of the following types. + + APPC for an APPC Initiator. + + JGRP for a JOBGROUP. + + JOB for a Batch job. + + STC for a Started task. + + TSU for a Time sharing user. + + \? for an unknown or pending job. + + | **type**: str + | **sample**: STC + + system + The job entry system that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + subsystem + The job entry subsystem that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + cpu_time + Sum of the CPU time used by each job step, in microseconds. + + | **type**: int + | **sample**: 5 + + execution_node + Execution node that picked the job and executed it. + + | **type**: str + | **sample**: STL1 + + origin_node + Origin node that submitted the job. + + | **type**: str + | **sample**: STL1 + + ret_code + Return code output collected from job log. + + | **type**: dict + | **sample**: + + .. code-block:: json + + { + "ret_code": { + "code": 0, + "msg": "CC 0000", + "msg_code": "0000", + "msg_txt": "", + "steps": [ + { + "step_cc": 0, + "step_name": "STEP0001" + } + ] + } + } + + msg + Return code or abend resulting from the job submission. + + | **type**: str + | **sample**: CC 0000 + + msg_code + Return code extracted from the `msg` so that it can be evaluated. For example, ABEND(S0C4) would yield "S0C4". + + | **type**: str + | **sample**: S0C4 + + msg_txt + Returns additional information related to the job. + + | **type**: str + | **sample**: No job can be located with this job name: HELLO + + code + Return code converted to integer value (when possible). + + | **type**: int + + steps + Series of JCL steps that were executed and their return codes. + + | **type**: list + | **elements**: dict + + step_name + Name of the step shown as "was executed" in the DD section. + + | **type**: str + | **sample**: STEP0001 + + step_cc + The CC returned for this step in the DD section. + + | **type**: int + + + + job_class + Job class for this job. + + | **type**: str + | **sample**: A + + svc_class + Service class for this job. + + | **type**: str + | **sample**: C + + priority + A numeric indicator of the job priority assigned through JES. + + | **type**: int + | **sample**: 4 + + asid + The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. + + | **type**: int + + creation_date + Date, local to the target system, when the job was created. + + | **type**: str + | **sample**: 2023-05-04 + + creation_time + Time, local to the target system, when the job was created. + + | **type**: str + | **sample**: 14:15:00 + + queue_position + The position within the job queue where the jobs resides. + + | **type**: int + | **sample**: 3 + + program_name + The name of the program found in the job's last completed step found in the PGM parameter. + + | **type**: str + | **sample**: IEBGENER + + execution_time + Total duration time of the job execution, if it has finished. If the job is still running, it represents the time elapsed from the job execution start and current time. + + | **type**: str + | **sample**: 00:00:10 + + +message + Message returned on failure. + + | **returned**: failure + | **type**: str + | **sample**: {'msg': 'List FAILED! no such job been found: IYK3Z0R9'} + diff --git a/docs/source/modules/zos_job_submit.rst b/docs/source/modules/zos_job_submit.rst new file mode 100644 index 0000000000..6d31b6abd0 --- /dev/null +++ b/docs/source/modules/zos_job_submit.rst @@ -0,0 +1,895 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_job_submit.py + +.. _zos_job_submit_module: + + +zos_job_submit -- Submit JCL +============================ + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Submit JCL in a data set, USS file, or file on the controller. +- Submit a job and monitor for completion. +- For an uncataloged dataset, specify the volume serial number. + + + + + +Parameters +---------- + + +src + The source file or data set containing the JCL to submit. + + It could be a physical sequential data set, a partitioned data set qualified by a member or a path (e.g. ``USER.TEST``, ``USER.JCL(TEST)``), or a generation data set from a generation data group (for example, ``USER.TEST.GDG(-2)``). + + Or a USS file. (e.g ``/u/tester/demo/sample.jcl``) + + Or a LOCAL file in ansible control node. (e.g ``/User/tester/ansible-playbook/sample.jcl``) + + When using a generation data set, only already created generations are valid. If either the relative name is positive, or negative but not found, the module will fail. + + | **required**: True + | **type**: str + + +location + The JCL location. Supported choices are ``data_set``, ``uss`` or ``local``. + + ``data_set`` can be a PDS, PDSE, sequential data set, or a generation data set. + + ``uss`` means the JCL location is located in UNIX System Services (USS). + + ``local`` means locally to the Ansible control node. + + | **required**: False + | **type**: str + | **default**: data_set + | **choices**: data_set, uss, local + + +wait_time_s + Option *wait_time_s* is the total time that module `zos_job_submit <./zos_job_submit.html>`_ will wait for a submitted job to complete. The time begins when the module is executed on the managed node. + + *wait_time_s* is measured in seconds and must be a value greater than 0 and less than 86400. + + The module can submit and forget jobs by setting *wait_time_s* to 0. This way the module will not try to retrieve the job details other than job id. Job details and contents can be retrieved later by using `zos_job_query <./zos_job_query.html>`_ or `zos_job_output <./zos_job_output.html>`_ if needed. + + | **required**: False + | **type**: int + | **default**: 10 + + +max_rc + Specifies the maximum return code allowed for any job step for the submitted job. + + | **required**: False + | **type**: int + + +return_output + Whether to print the DD output. + + If false, an empty list will be returned in the ddnames field. + + | **required**: False + | **type**: bool + | **default**: True + + +volume + The volume serial (VOLSER) is where the data set resides. The option is required only when the data set is not cataloged on the system. + + When configured, the `zos_job_submit <./zos_job_submit.html>`_ will try to catalog the data set for the volume serial. If it is not able to, the module will fail. + + Ignored for *location=uss* and *location=local*. + + | **required**: False + | **type**: str + + +encoding + Specifies which encoding the local JCL file should be converted from and to, before submitting the job. + + This option is only supported for when *location=local*. + + If this parameter is not provided, and the z/OS systems default encoding can not be identified, the JCL file will be converted from UTF-8 to IBM-1047 by default, otherwise the module will detect the z/OS system encoding. + + | **required**: False + | **type**: dict + + + from + The character set of the local JCL file; defaults to UTF-8. + + Supported character sets rely on the target platform; the most common character sets are supported. + + | **required**: False + | **type**: str + | **default**: UTF-8 + + + to + The character set to convert the local JCL file to on the remote z/OS system; defaults to IBM-1047 when z/OS systems default encoding can not be identified. + + If not provided, the module will attempt to identify and use the default encoding on the z/OS system. + + Supported character sets rely on the target version; the most common character sets are supported. + + | **required**: False + | **type**: str + | **default**: IBM-1047 + + + +use_template + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + + Only valid when ``src`` is a local file or directory. + + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + + | **required**: False + | **type**: bool + | **default**: False + + +template_parameters + Options to set the way Jinja2 will process templates. + + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + + These options are ignored unless ``use_template`` is true. + + | **required**: False + | **type**: dict + + + variable_start_string + Marker for the beginning of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: {{ + + + variable_end_string + Marker for the end of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: }} + + + block_start_string + Marker for the beginning of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: {% + + + block_end_string + Marker for the end of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: %} + + + comment_start_string + Marker for the beginning of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: {# + + + comment_end_string + Marker for the end of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: #} + + + line_statement_prefix + Prefix used by Jinja2 to identify line-based statements. + + | **required**: False + | **type**: str + + + line_comment_prefix + Prefix used by Jinja2 to identify comment lines. + + | **required**: False + | **type**: str + + + lstrip_blocks + Whether Jinja2 should strip leading spaces from the start of a line to a block. + + | **required**: False + | **type**: bool + | **default**: False + + + trim_blocks + Whether Jinja2 should remove the first newline after a block is removed. + + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + + | **required**: False + | **type**: bool + | **default**: True + + + keep_trailing_newline + Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. + + | **required**: False + | **type**: bool + | **default**: False + + + newline_sequence + Sequence that starts a newline in a template. + + | **required**: False + | **type**: str + | **default**: \\n + | **choices**: \\n, \\r, \\r\\n + + auto_reload + Whether to reload a template file when it has changed after the task has started. + + | **required**: False + | **type**: bool + | **default**: False + + + autoescape + Whether to enable autoescape of XML/HTML elements on a template. + + | **required**: False + | **type**: bool + | **default**: True + + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Submit JCL in a PDSE member. + zos_job_submit: + src: HLQ.DATA.LLQ(SAMPLE) + location: data_set + register: response + + - name: Submit JCL in USS with no DDs in the output. + zos_job_submit: + src: /u/tester/demo/sample.jcl + location: uss + return_output: false + + - name: Convert local JCL to IBM-037 and submit the job. + zos_job_submit: + src: /Users/maxy/ansible-playbooks/provision/sample.jcl + location: local + encoding: + from: ISO8859-1 + to: IBM-037 + + - name: Submit JCL in an uncataloged PDSE on volume P2SS01. + zos_job_submit: + src: HLQ.DATA.LLQ(SAMPLE) + location: data_set + volume: P2SS01 + + - name: Submit a long running PDS job and wait up to 30 seconds for completion. + zos_job_submit: + src: HLQ.DATA.LLQ(LONGRUN) + location: data_set + wait_time_s: 30 + + - name: Submit a long running PDS job and wait up to 30 seconds for completion. + zos_job_submit: + src: HLQ.DATA.LLQ(LONGRUN) + location: data_set + wait_time_s: 30 + + - name: Submit JCL and set the max return code the module should fail on to 16. + zos_job_submit: + src: HLQ.DATA.LLQ + location: data_set + max_rc: 16 + + - name: Submit JCL from the latest generation data set in a generation data group. + zos_job_submit: + src: HLQ.DATA.GDG(0) + location: data_set + + - name: Submit JCL from a previous generation data set in a generation data group. + zos_job_submit: + src: HLQ.DATA.GDG(-2) + location: data_set + + + + +Notes +----- + +.. note:: + For supported character sets used to encode data, refer to the `documentation `_. + + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + + + + + + + +Return Values +------------- + + +jobs + List of jobs output. If no job status is found, this will return an empty ret_code with msg_txt explanation. + + | **returned**: success + | **type**: list + | **elements**: dict + | **sample**: + + .. code-block:: json + + [ + { + "asid": 0, + "class": "K", + "content_type": "JOB", + "cpu_time": 1, + "creation_date": "2023-05-03", + "creation_time": "12:13:00", + "ddnames": [ + { + "byte_count": "677", + "content": [ + "1 J E S 2 J O B L O G -- S Y S T E M S T L 1 -- N O D E S T L 1 ", + "0 ", + " 12.50.08 JOB00361 ---- FRIDAY, 13 MAR 2020 ----", + " 12.50.08 JOB00361 IRR010I USERID OMVSADM IS ASSIGNED TO THIS JOB.", + " 12.50.08 JOB00361 ICH70001I OMVSADM LAST ACCESS AT 12:50:03 ON FRIDAY, MARCH 13, 2020", + " 12.50.08 JOB00361 $HASP373 DBDGEN00 STARTED - INIT 15 - CLASS K - SYS STL1", + " 12.50.08 JOB00361 SMF000I DBDGEN00 C ASMA90 0000", + " 12.50.09 JOB00361 SMF000I DBDGEN00 L IEWL 0000", + " 12.50.09 JOB00361 $HASP395 DBDGEN00 ENDED - RC=0000", + "0------ JES2 JOB STATISTICS ------", + "- 13 MAR 2020 JOB EXECUTION DATE", + "- 28 CARDS READ", + "- 158 SYSOUT PRINT RECORDS", + "- 0 SYSOUT PUNCH RECORDS", + "- 12 SYSOUT SPOOL KBYTES", + "- 0.00 MINUTES EXECUTION TIME" + ], + "ddname": "JESMSGLG", + "id": "2", + "procstep": "", + "record_count": "16", + "stepname": "JES2" + }, + { + "byte_count": "2136", + "content": [ + " 1 //DBDGEN00 JOB MSGLEVEL=1,MSGCLASS=E,CLASS=K, JOB00361", + " // LINES=999999,TIME=1440,REGION=0M, ", + " // MEMLIMIT=NOLIMIT ", + " 2 /*JOBPARM SYSAFF=* ", + " //* ", + " 3 //DBDGEN PROC MBR=TEMPNAME ", + " //C EXEC PGM=ASMA90, ", + " // PARM=\u0027OBJECT,NODECK,NOLIST\u0027 ", + " //SYSLIB DD DISP=SHR, ", + " // DSN=IMSBLD.I15RTSMM.SDFSMAC ", + " //SYSLIN DD DISP=(NEW,PASS),RECFM=F,LRECL=80,BLKSIZE=80, ", + " // UNIT=SYSDA,SPACE=(CYL,(10,5),RLSE,,) ", + " //SYSUT1 DD DISP=(NEW,DELETE),UNIT=SYSDA,SPACE=(CYL, ", + " // (10,5),,,) ", + " //SYSPRINT DD SYSOUT=* ", + " //L EXEC PGM=IEWL, ", + " // PARM=\u0027XREF,NOLIST\u0027, ", + " // COND=(0,LT,C) ", + " //SYSLMOD DD DISP=SHR, ", + " // DSN=IMSTESTL.IMS1.DBDLIB(\u0026MBR) ", + " //SYSLIN DD DSN=*.C.SYSLIN,DISP=(OLD,DELETE) ", + " //SYSPRINT DD SYSOUT=* ", + " //* ", + " // PEND ", + " 4 //DLORD6 EXEC DBDGEN, ", + " // MBR=DLORD6 ", + " 5 ++DBDGEN PROC MBR=TEMPNAME ", + " 6 ++C EXEC PGM=ASMA90, ", + " ++ PARM=\u0027OBJECT,NODECK,NOLIST\u0027 ", + " 7 ++SYSLIB DD DISP=SHR, ", + " ++ DSN=IMSBLD.I15RTSMM.SDFSMAC ", + " 8 ++SYSLIN DD DISP=(NEW,PASS),RECFM=F,LRECL=80,BLKSIZE=80, ", + " ++ UNIT=SYSDA,SPACE=(CYL,(10,5),RLSE,,) ", + " 9 ++SYSUT1 DD DISP=(NEW,DELETE),UNIT=SYSDA,SPACE=(CYL, ", + " ++ (10,5),,,) ", + " 10 ++SYSPRINT DD SYSOUT=* ", + " 11 //SYSIN DD DISP=SHR, ", + " // DSN=IMSTESTL.IMS1.DBDSRC(DLORD6) ", + " 12 ++L EXEC PGM=IEWL, ", + " ++ PARM=\u0027XREF,NOLIST\u0027, ", + " ++ COND=(0,LT,C) ", + " 13 ++SYSLMOD DD DISP=SHR, ", + " ++ DSN=IMSTESTL.IMS1.DBDLIB(\u0026MBR) ", + " IEFC653I SUBSTITUTION JCL - DISP=SHR,DSN=IMSTESTL.IMS1.DBDLIB(DLORD6)", + " 14 ++SYSLIN DD DSN=*.C.SYSLIN,DISP=(OLD,DELETE) ", + " 15 ++SYSPRINT DD SYSOUT=* ", + " ++* " + ], + "ddname": "JESJCL", + "id": "3", + "procstep": "", + "record_count": "47", + "stepname": "JES2" + }, + { + "byte_count": "2414", + "content": [ + " STMT NO. MESSAGE", + " 4 IEFC001I PROCEDURE DBDGEN WAS EXPANDED USING INSTREAM PROCEDURE DEFINITION", + " ICH70001I OMVSADM LAST ACCESS AT 12:50:03 ON FRIDAY, MARCH 13, 2020", + " IEF236I ALLOC. FOR DBDGEN00 C DLORD6", + " IEF237I 083C ALLOCATED TO SYSLIB", + " IGD100I 0940 ALLOCATED TO DDNAME SYSLIN DATACLAS ( )", + " IGD100I 0942 ALLOCATED TO DDNAME SYSUT1 DATACLAS ( )", + " IEF237I JES2 ALLOCATED TO SYSPRINT", + " IEF237I 01A0 ALLOCATED TO SYSIN", + " IEF142I DBDGEN00 C DLORD6 - STEP WAS EXECUTED - COND CODE 0000", + " IEF285I IMSBLD.I15RTSMM.SDFSMAC KEPT ", + " IEF285I VOL SER NOS= IMSBG2. ", + " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101894 PASSED ", + " IEF285I VOL SER NOS= 000000. ", + " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101895 DELETED ", + " IEF285I VOL SER NOS= 333333. ", + " IEF285I OMVSADM.DBDGEN00.JOB00361.D0000101.? SYSOUT ", + " IEF285I IMSTESTL.IMS1.DBDSRC KEPT ", + " IEF285I VOL SER NOS= USER03. ", + " IEF373I STEP/C /START 2020073.1250", + " IEF032I STEP/C /STOP 2020073.1250 ", + " CPU: 0 HR 00 MIN 00.03 SEC SRB: 0 HR 00 MIN 00.00 SEC ", + " VIRT: 252K SYS: 240K EXT: 1876480K SYS: 11896K", + " ATB- REAL: 1048K SLOTS: 0K", + " VIRT- ALLOC: 14M SHRD: 0M", + " IEF236I ALLOC. FOR DBDGEN00 L DLORD6", + " IEF237I 01A0 ALLOCATED TO SYSLMOD", + " IEF237I 0940 ALLOCATED TO SYSLIN", + " IEF237I JES2 ALLOCATED TO SYSPRINT", + " IEF142I DBDGEN00 L DLORD6 - STEP WAS EXECUTED - COND CODE 0000", + " IEF285I IMSTESTL.IMS1.DBDLIB KEPT ", + " IEF285I VOL SER NOS= USER03. ", + " IEF285I SYS20073.T125008.RA000.DBDGEN00.R0101894 DELETED ", + " IEF285I VOL SER NOS= 000000. ", + " IEF285I OMVSADM.DBDGEN00.JOB00361.D0000102.? SYSOUT ", + " IEF373I STEP/L /START 2020073.1250", + " IEF032I STEP/L /STOP 2020073.1250 ", + " CPU: 0 HR 00 MIN 00.00 SEC SRB: 0 HR 00 MIN 00.00 SEC ", + " VIRT: 92K SYS: 256K EXT: 1768K SYS: 11740K", + " ATB- REAL: 1036K SLOTS: 0K", + " VIRT- ALLOC: 11M SHRD: 0M", + " IEF375I JOB/DBDGEN00/START 2020073.1250", + " IEF033I JOB/DBDGEN00/STOP 2020073.1250 ", + " CPU: 0 HR 00 MIN 00.03 SEC SRB: 0 HR 00 MIN 00.00 SEC " + ], + "ddname": "JESYSMSG", + "id": "4", + "procstep": "", + "record_count": "44", + "stepname": "JES2" + }, + { + "byte_count": "1896", + "content": [ + "1z/OS V2 R2 BINDER 12:50:08 FRIDAY MARCH 13, 2020 ", + " BATCH EMULATOR JOB(DBDGEN00) STEP(DLORD6 ) PGM= IEWL PROCEDURE(L ) ", + " IEW2278I B352 INVOCATION PARAMETERS - XREF,NOLIST ", + " IEW2650I 5102 MODULE ENTRY NOT PROVIDED. ENTRY DEFAULTS TO SECTION DLORD6. ", + " ", + " ", + "1 C R O S S - R E F E R E N C E T A B L E ", + " _________________________________________ ", + " ", + " TEXT CLASS = B_TEXT ", + " ", + " --------------- R E F E R E N C E -------------------------- T A R G E T -------------------------------------------", + " CLASS ELEMENT | ELEMENT |", + " OFFSET SECT/PART(ABBREV) OFFSET TYPE | SYMBOL(ABBREV) SECTION (ABBREV) OFFSET CLASS NAME |", + " | |", + " *** E N D O F C R O S S R E F E R E N C E *** ", + "1z/OS V2 R2 BINDER 12:50:08 FRIDAY MARCH 13, 2020 ", + " BATCH EMULATOR JOB(DBDGEN00) STEP(DLORD6 ) PGM= IEWL PROCEDURE(L ) ", + " IEW2850I F920 DLORD6 HAS BEEN SAVED WITH AMODE 24 AND RMODE 24. ENTRY POINT NAME IS DLORD6. ", + " IEW2231I 0481 END OF SAVE PROCESSING. ", + " IEW2008I 0F03 PROCESSING COMPLETED. RETURN CODE = 0. ", + " ", + " ", + " ", + "1---------------------- ", + " MESSAGE SUMMARY REPORT ", + " ---------------------- ", + " TERMINAL MESSAGES (SEVERITY = 16) ", + " NONE ", + " ", + " SEVERE MESSAGES (SEVERITY = 12) ", + " NONE ", + " ", + " ERROR MESSAGES (SEVERITY = 08) ", + " NONE ", + " ", + " WARNING MESSAGES (SEVERITY = 04) ", + " NONE ", + " ", + " INFORMATIONAL MESSAGES (SEVERITY = 00) ", + " 2008 2231 2278 2650 2850 ", + " ", + " ", + " **** END OF MESSAGE SUMMARY REPORT **** ", + " " + ], + "ddname": "SYSPRINT", + "id": "102", + "procstep": "L", + "record_count": "45", + "stepname": "DLORD6" + } + ], + "execution_node": "STL1", + "execution_time": "00:00:10", + "job_class": "K", + "job_id": "JOB00361", + "job_name": "DBDGEN00", + "origin_node": "STL1", + "owner": "OMVSADM", + "priority": 1, + "program_name": "IEBGENER", + "queue_position": 3, + "ret_code": { + "code": 0, + "msg": "CC 0000", + "msg_code": "0000", + "msg_txt": "", + "steps": [ + { + "step_cc": 0, + "step_name": "DLORD6" + } + ] + }, + "subsystem": "STL1", + "svc_class": "?", + "system": "STL1" + } + ] + + job_id + The z/OS job ID of the job containing the spool file. + + | **type**: str + | **sample**: JOB00134 + + job_name + The name of the batch job. + + | **type**: str + | **sample**: HELLO + + content_type + Type of address space used by the job, can be one of the following types. + + APPC for an APPC Initiator. + + JGRP for a JOBGROUP. + + JOB for a Batch job. + + STC for a Started task. + + TSU for a Time sharing user. + + \? for an unknown or pending job. + + | **type**: str + | **sample**: STC + + duration + The total lapsed time the JCL ran for. + + | **type**: int + + execution_time + Total duration time of the job execution, if it has finished. + + | **type**: str + | **sample**: 00:00:10 + + ddnames + Data definition names. + + | **type**: list + | **elements**: dict + + ddname + Data definition name. + + | **type**: str + | **sample**: JESMSGLG + + record_count + Count of the number of lines in a print data set. + + | **type**: int + | **sample**: 17 + + id + The file ID. + + | **type**: str + | **sample**: 2 + + stepname + A step name is name that identifies the job step so that other JCL statements or the operating system can refer to it. + + | **type**: str + | **sample**: JES2 + + procstep + Identifies the set of statements inside JCL grouped together to perform a particular function. + + | **type**: str + | **sample**: PROC1 + + byte_count + Byte size in a print data set. + + | **type**: int + | **sample**: 574 + + content + The ddname content. + + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + " 1 //HELLO JOB (T043JM,JM00,1,0,0,0),\u0027HELLO WORLD - JRM\u0027,CLASS=R, JOB00134", + " // MSGCLASS=X,MSGLEVEL=1,NOTIFY=S0JM ", + " //* ", + " //* PRINT \"HELLO WORLD\" ON JOB OUTPUT ", + " //* ", + " //* NOTE THAT THE EXCLAMATION POINT IS INVALID EBCDIC FOR JCL ", + " //* AND WILL CAUSE A JCL ERROR ", + " //* ", + " 2 //STEP0001 EXEC PGM=IEBGENER ", + " 3 //SYSIN DD DUMMY ", + " 4 //SYSPRINT DD SYSOUT=* ", + " 5 //SYSUT1 DD * ", + " 6 //SYSUT2 DD SYSOUT=* ", + " 7 // " + ] + + + ret_code + Return code output collected from the job log. + + | **type**: dict + | **sample**: + + .. code-block:: json + + { + "ret_code": { + "code": 0, + "msg": "CC 0000", + "msg_code": "0000", + "msg_txt": "", + "steps": [ + { + "step_cc": 0, + "step_name": "STEP0001" + } + ] + } + } + + msg + Job status resulting from the job submission. + + Job status `ABEND` indicates the job ended abnormally. + + Job status `AC` indicates the job is active, often a started task or job taking long. + + Job status `CAB` indicates a converter abend. + + Job status `CANCELED` indicates the job was canceled. + + Job status `CNV` indicates a converter error. + + Job status `FLU` indicates the job was flushed. + + Job status `JCLERR` or `JCL ERROR` indicates the JCL has an error. + + Job status `SEC` or `SEC ERROR` indicates the job as encountered a security error. + + Job status `SYS` indicates a system failure. + + Job status `?` indicates status can not be determined. + + Job status `TYPRUN=SCAN` indicates that the job had the TYPRUN parameter with SCAN option. + + Job status `TYPRUN=COPY` indicates that the job had the TYPRUN parameter with COPY option. + + Job status `HOLD` indicates that the job had the TYPRUN parameter with either the HOLD or JCLHOLD options. + + Jobs where status can not be determined will result in None (NULL). + + | **type**: str + | **sample**: AC + + msg_code + The return code from the submitted job as a string. + + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. + + | **type**: str + + msg_txt + Returns additional information related to the submitted job. + + Jobs which have no additional information will result in None (NULL). + + | **type**: str + | **sample**: The job JOB00551 was run with special job processing TYPRUN=SCAN. This will result in no completion, return code or job steps and changed will be false. + + code + The return code converted to an integer value when available. + + Jobs which have no return code will result in None (NULL), such is the case of a job that errors or is active. + + | **type**: int + + steps + Series of JCL steps that were executed and their return codes. + + | **type**: list + | **elements**: dict + + step_name + Name of the step shown as "was executed" in the DD section. + + | **type**: str + | **sample**: STEP0001 + + step_cc + The CC returned for this step in the DD section. + + | **type**: int + + + + job_class + Job class for this job. + + | **type**: str + | **sample**: A + + svc_class + Service class for this job. + + | **type**: str + | **sample**: C + + priority + A numeric indicator of the job priority assigned through JES. + + | **type**: int + | **sample**: 4 + + asid + The address Space Identifier (ASID) that is a unique descriptor for the job address space. Zero if not active. + + | **type**: int + + creation_date + Date, local to the target system, when the job was created. + + | **type**: str + | **sample**: 2023-05-04 + + creation_time + Time, local to the target system, when the job was created. + + | **type**: str + | **sample**: 14:15:00 + + queue_position + The position within the job queue where the jobs resides. + + | **type**: int + | **sample**: 3 + + program_name + The name of the program found in the job's last completed step found in the PGM parameter. + + | **type**: str + | **sample**: IEBGENER + + system + The job entry system that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + subsystem + The job entry subsystem that MVS uses to do work. + + | **type**: str + | **sample**: STL1 + + cpu_time + Sum of the CPU time used by each job step, in microseconds. + + | **type**: int + | **sample**: 5 + + execution_node + Execution node that picked the job and executed it. + + | **type**: str + | **sample**: STL1 + + origin_node + Origin node that submitted the job. + + | **type**: str + | **sample**: STL1 + + diff --git a/docs/source/modules/zos_lineinfile.rst b/docs/source/modules/zos_lineinfile.rst new file mode 100644 index 0000000000..3ed1a1e339 --- /dev/null +++ b/docs/source/modules/zos_lineinfile.rst @@ -0,0 +1,351 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_lineinfile.py + +.. _zos_lineinfile_module: + + +zos_lineinfile -- Manage textual data on z/OS +============================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Manage lines in z/OS UNIX System Services (USS) files, PS (sequential data set), PDS, PDSE, or member of a PDS or PDSE. +- This module ensures a particular line is in a USS file or data set, or replace an existing line using a back-referenced regular expression. +- This is primarily useful when you want to change a single line in a USS file or data set only. + + + + + +Parameters +---------- + + +src + The location can be a UNIX System Services (USS) file, PS (sequential data set), member of a PDS or PDSE, PDS, PDSE. + + The USS file must be an absolute pathname. + + Generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). + + | **required**: True + | **type**: str + + +regexp + The regular expression to look for in every line of the USS file or data set. + + For ``state=present``, the pattern to replace if found. Only the last line found will be replaced. + + For ``state=absent``, the pattern of the line(s) to remove. + + If the regular expression is not matched, the line will be added to the USS file or data set in keeping with ``insertbefore`` or ``insertafter`` settings. + + When modifying a line the regexp should typically match both the initial state of the line as well as its state after replacement by ``line`` to ensure idempotence. + + | **required**: False + | **type**: str + + +state + Whether the line should be inserted/replaced(present) or removed(absent). + + | **required**: False + | **type**: str + | **default**: present + | **choices**: absent, present + + +line + The line to insert/replace into the USS file or data set. + + Required for ``state=present``. + + If ``backrefs`` is set, may contain backreferences that will get expanded with the ``regexp`` capture groups if the regexp matches. + + | **required**: False + | **type**: str + + +backrefs + Used with ``state=present``. + + If set, ``line`` can contain backreferences (both positional and named) that will get populated if the ``regexp`` matches. + + This parameter changes the operation of the module slightly; ``insertbefore`` and ``insertafter`` will be ignored, and if the ``regexp`` does not match anywhere in the USS file or data set, the USS file or data set will be left unchanged. + + If the ``regexp`` does match, the last matching line will be replaced by the expanded line parameter. + + | **required**: False + | **type**: bool + | **default**: False + + +insertafter + Used with ``state=present``. + + If specified, the line will be inserted after the last match of specified regular expression. + + If the first match is required, use(firstmatch=yes). + + A special value is available; ``EOF`` for inserting the line at the end of the USS file or data set. + + If the specified regular expression has no matches, EOF will be used instead. + + If ``insertbefore`` is set, default value ``EOF`` will be ignored. + + If regular expressions are passed to both ``regexp`` and ``insertafter``, ``insertafter`` is only honored if no match for ``regexp`` is found. + + May not be used with ``backrefs`` or ``insertbefore``. + + Choices are EOF or '*regex*' + + Default is EOF + + | **required**: False + | **type**: str + + +insertbefore + Used with ``state=present``. + + If specified, the line will be inserted before the last match of specified regular expression. + + If the first match is required, use ``firstmatch=yes``. + + A value is available; ``BOF`` for inserting the line at the beginning of the USS file or data set. + + If the specified regular expression has no matches, the line will be inserted at the end of the USS file or data set. + + If regular expressions are passed to both ``regexp`` and ``insertbefore``, ``insertbefore`` is only honored if no match for ``regexp`` is found. + + May not be used with ``backrefs`` or ``insertafter``. + + Choices are BOF or '*regex*' + + | **required**: False + | **type**: str + + +backup + Creates a backup file or backup data set for *src*, including the timestamp information to ensure that you retrieve the original file. + + *backup_name* can be used to specify a backup file name if *backup=true*. + + The backup file name will be return on either success or failure of module execution such that data can be retrieved. + + Use generation data set (GDS) relative positive name SOME.CREATION(+1) + + | **required**: False + | **type**: bool + | **default**: False + + +backup_name + Specify the USS file name or data set name for the destination backup. + + If the source *src* is a USS file or path, the backup_name must be a file or path name, and the USS file or path must be an absolute path name. + + If the source is an MVS data set, the backup_name must be an MVS data set name. + + If the backup_name is not provided, the default backup_name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + | **required**: False + | **type**: str + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +firstmatch + Used with ``insertafter`` or ``insertbefore``. + + If set, ``insertafter`` and ``insertbefore`` will work with the first line that matches the given regular expression. + + | **required**: False + | **type**: bool + | **default**: False + + +encoding + The character set of the source *src*. `zos_lineinfile <./zos_lineinfile.html>`_ requires to be provided with correct encoding to read the content of USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: False + | **type**: str + | **default**: IBM-1047 + + +force + Specifies that the data set can be shared with others during an update which results in the data set you are updating to be simultaneously updated by others. + + This is helpful when a data set is being used in a long running process such as a started task and you are wanting to update or read. + + The ``force`` option enables sharing of data sets through the disposition *DISP=SHR*. + + | **required**: False + | **type**: bool + | **default**: False + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Ensure value of a variable in the sequential data set + zos_lineinfile: + src: SOME.DATA.SET + regexp: '^VAR=' + line: VAR="some value" + + - name: Remove all comments in the USS file + zos_lineinfile: + src: /tmp/src/somefile + state: absent + regexp: '^#' + + - name: Ensure the https port is 8080 + zos_lineinfile: + src: /tmp/src/somefile + regexp: '^Listen ' + insertafter: '^#Listen ' + line: Listen 8080 + + - name: Ensure we have our own comment added to the partitioned data set member + zos_lineinfile: + src: SOME.PARTITIONED.DATA.SET(DATA) + regexp: '#^VAR=' + insertbefore: '^VAR=' + line: '# VAR default value' + + - name: Ensure the user working directory for liberty is set as needed + zos_lineinfile: + src: /tmp/src/somefile + regexp: '^(.*)User(\d+)m(.*)$' + line: '\1APPUser\3' + backrefs: true + + - name: Add a line to a member while a task is in execution + zos_lineinfile: + src: SOME.PARTITIONED.DATA.SET(DATA) + insertafter: EOF + line: 'Should be a working test now' + force: true + + - name: Add a line to a gds + zos_lineinfile: + src: SOME.CREATION(-2) + insertafter: EOF + line: 'Should be a working test now' + + - name: Add a line to dataset and backup in a new generation of gds + zos_lineinfile: + src: SOME.CREATION.TEST + insertafter: EOF + backup: true + backup_name: CREATION.GDS(+1) + line: 'Should be a working test now' + + + + +Notes +----- + +.. note:: + It is the playbook author or user's responsibility to avoid files that should not be encoded, such as binary files. A user is described as the remote user, configured either for the playbook or playbook tasks, who can also obtain escalated privileges to execute as root or another user. + + All data sets are always assumed to be cataloged. If an uncataloged data set needs to be encoded, it should be cataloged first. + + For supported character sets used to encode data, refer to the `documentation `_. + + + + + + + +Return Values +------------- + + +changed + Indicates if the source was modified. Value of 1 represents `true`, otherwise `false`. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + 1 + +found + Number of the matching patterns + + | **returned**: success + | **type**: int + | **sample**: 5 + +cmd + constructed dsed shell cmd based on the parameters + + | **returned**: success + | **type**: str + | **sample**: dsedhelper -d -en IBM-1047 /^PATH=/a\\PATH=/dir/bin:$PATH/$ /etc/profile + +msg + The module messages + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +return_content + The error messages from ZOAU dsed + + | **returned**: failure + | **type**: str + | **sample**: BGYSC1311E Iconv error, cannot open converter from ISO-88955-1 to IBM-1047 + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true + | **type**: str + | **sample**: /path/to/file.txt.2015-02-03@04:15~ + diff --git a/docs/source/modules/zos_mount.rst b/docs/source/modules/zos_mount.rst new file mode 100644 index 0000000000..703795c3da --- /dev/null +++ b/docs/source/modules/zos_mount.rst @@ -0,0 +1,628 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_mount.py + +.. _zos_mount_module: + + +zos_mount -- Mount a z/OS file system. +====================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The module `zos_mount <./zos_mount.html>`_ can manage mount operations for a z/OS UNIX System Services (USS) file system data set. +- The *src* data set must be unique and a Fully Qualified Name (FQN). +- The *path* will be created if needed. + + + + + +Parameters +---------- + + +path + The absolute path name onto which the file system is to be mounted. + + The *path* is case sensitive and must be less than or equal 1023 characters long. + + | **required**: True + | **type**: str + + +src + The name of the file system to be added to the file system hierarchy. + + The file system *src* must be a data set of type *fs_type*. + + The file system *src* data set must be cataloged. + + | **required**: True + | **type**: str + + +fs_type + The type of file system that will be mounted. + + The physical file systems data set format to perform the logical mount. + + The *fs_type* is required to be lowercase. + + | **required**: True + | **type**: str + | **choices**: hfs, zfs, nfs, tfs + + +state + The desired status of the described mount (choice). + + If *state=mounted* and *src* are not in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will be updated, the device will be mounted and the module will complete successfully with *changed=True*. + + + If *state=mounted* and *src* are in use, the module will add the file system entry to the parmlib member *persistent/data_store* if not present. The *path* will not be updated, the device will not be mounted and the module will complete successfully with *changed=False*. + + + If *state=unmounted* and *src* are in use, the module will **not** add the file system entry to the parmlib member *persistent/data_store*. The device will be unmounted and the module will complete successfully with *changed=True*. + + + If *state=unmounted* and *src* are not in use, the module will **not** add the file system entry to parmlib member *persistent/data_store*.The device will remain unchanged and the module will complete with *changed=False*. + + + If *state=present*, the module will add the file system entry to the provided parmlib member *persistent/data_store* if not present. The module will complete successfully with *changed=True*. + + + If *state=absent*, the module will remove the file system entry to the provided parmlib member *persistent/data_store* if present. The module will complete successfully with *changed=True*. + + + If *state=remounted*, the module will **not** add the file system entry to parmlib member *persistent/data_store*. The device will be unmounted and mounted, the module will complete successfully with *changed=True*. + + + | **required**: False + | **type**: str + | **default**: mounted + | **choices**: absent, mounted, unmounted, present, remounted + + +persistent + Add or remove mount command entries to provided *data_store* + + | **required**: False + | **type**: dict + + + data_store + The data set name used for persisting a mount command. This is usually BPXPRMxx or a copy. + + | **required**: True + | **type**: str + + + backup + Creates a backup file or backup data set for *data_store*, including the timestamp information to ensure that you retrieve the original parameters defined in *data_store*. + + *backup_name* can be used to specify a backup file name if *backup=true*. + + The backup file name will be returned on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + + backup_name + Specify the USS file name or data set name for the destination backup. + + If the source *data_store* is a USS file or path, the *backup_name* name can be relative or absolute for file or path name. + + If the source is an MVS data set, the backup_name must be an MVS data set name. + + If the backup_name is not provided, the default *backup_name* will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp. For example, ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + + If the source is an MVS data set, it will be a data set with a random name generated by calling the ZOAU API. The MVS backup data set recovery can be done by renaming it. + + | **required**: False + | **type**: str + + + comment + If provided, this is used as a comment that surrounds the command in the *persistent/data_store* + + Comments are used to encapsulate the *persistent/data_store* entry such that they can easily be understood and located. + + | **required**: False + | **type**: list + | **elements**: str + + + +unmount_opts + Describes how the unmount will be performed. + + For more on coded character set identifiers, review the IBM documentation topic **UNMOUNT - Remove a file system from the file hierarchy**. + + | **required**: False + | **type**: str + | **default**: normal + | **choices**: drain, force, immediate, normal, remount, reset + + +mount_opts + Options available to the mount. + + If *mount_opts=ro* on a mounted/remount, mount is performed read-only. + + If *mount_opts=same* and (unmount_opts=remount), mount is opened in the same mode as previously opened. + + If *mount_opts=nowait*, mount is performed asynchronously. + + If *mount_opts=nosecurity*, security checks are not enforced for files in this file system. + + | **required**: False + | **type**: str + | **default**: rw + | **choices**: ro, rw, same, nowait, nosecurity + + +src_params + Specifies a parameter string to be passed to the file system type. + + The parameter format and content are specified by the file system type. + + | **required**: False + | **type**: str + + +tag_untagged + If present, tags get written to any untagged file. + + When the file system is unmounted, the tags are lost. + + If *tag_untagged=notext* none of the untagged files in the file system are automatically converted during file reading and writing. + + If *tag_untagged=text* each untagged file is implicitly marked as containing pure text data that can be converted. + + If this flag is used, use of tag_ccsid is encouraged. + + | **required**: False + | **type**: str + | **choices**: text, notext + + +tag_ccsid + Identifies the coded character set identifier (ccsid) to be implicitly set for the untagged file. + + For more on coded character set identifiers, review the IBM documentation topic **Coded Character Sets**. + + Specified as a decimal value from 0 to 65535. However, when TEXT is specified, the value must be between 0 and 65535. + + The value is not checked as being valid and the corresponding code page is not checked as being installed. + + Required when *tag_untagged=TEXT*. + + | **required**: False + | **type**: int + + +allow_uid + Specifies whether the SETUID and SETGID mode bits on an executable in this file system are considered. Also determines whether the APF extended attribute or the Program Control extended attribute is honored. + + + If *allow_uid=True* the SETUID and SETGID mode bits are considered when a program in this file system is run. SETUID is the default. + + + If *allow_uid=False* the SETUID and SETGID mode bits are ignored when a program in this file system is run. The program runs as though the SETUID and SETGID mode bits were not set. Also, if you specify the NOSETUID option on MOUNT, the APF extended attribute and the Program Control Bit values are ignored. + + + | **required**: False + | **type**: bool + | **default**: True + + +sysname + For systems participating in shared file system, *sysname* specifies the particular system on which a mount should be performed. This system will then become the owner of the file system mounted. This system must be IPLed with SYSPLEX(YES). + + + *sysname* is the name of a system participating in shared file system. The name must be 1-8 characters long; the valid characters are A-Z, 0-9, $, @, and #. + + + | **required**: False + | **type**: str + + +automove + These parameters apply only in a sysplex where systems are exploiting the shared file system capability. They specify what happens to the ownership of a file system when a shutdown, PFS termination, dead system takeover, or file system move occurs. The default setting is AUTOMOVE where the file system will be randomly moved to another system (no system list used). + + + *automove=automove* indicates that ownership of the file system can be automatically moved to another system participating in a shared file system. + + + *automove=noautomove* prevents movement of the file system's ownership in some situations. + + + *automove=unmount* allows the file system to be unmounted in some situations. + + + | **required**: False + | **type**: str + | **default**: automove + | **choices**: automove, noautomove, unmount + + +automove_list + If(automove=automove), this option will be checked. + + + This specifies the list of servers to include or exclude as destinations. + + + None is a valid value, meaning 'move anywhere'. + + + Indicator is either INCLUDE or EXCLUDE, which can also be abbreviated as I or E. + + + | **required**: False + | **type**: str + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Mount a filesystem. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + + - name: Unmount a filesystem. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: unmounted + unmount_opts: remount + mount_opts: same + + - name: Mount a filesystem readonly. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + mount_opts: ro + + - name: Mount a filesystem and record change in BPXPRMAA. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + persistent: + data_store: SYS1.PARMLIB(BPXPRMAA) + comment: For Tape2 project + + - name: Mount a filesystem and record change in BPXPRMAA after backing up to BPXPRMAB. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + persistent: + data_store: SYS1.PARMLIB(BPXPRMAA) + backup: true + backup_name: SYS1.PARMLIB(BPXPRMAB) + comment: For Tape2 project + + - name: Mount a filesystem ignoring uid/gid values. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + allow_uid: false + + - name: Mount a filesystem asynchronously (don't wait for completion). + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + mount_opts: nowait + + - name: Mount a filesystem with no security checks. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + mount_opts: nosecurity + + - name: Mount a filesystem, limiting automove to 4 devices. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + automove: automove + automove_list: I,DEV1,DEV2,DEV3,DEV9 + + - name: Mount a filesystem, limiting automove to all except 4 devices. + zos_mount: + src: SOMEUSER.VVV.ZFS + path: /u/omvsadm/core + fs_type: zfs + state: mounted + automove: automove + automove_list: EXCLUDE,DEV4,DEV5,DEV6,DEV7 + + + + +Notes +----- + +.. note:: + All data sets are always assumed to be cataloged. + + If an uncataloged data set needs to be fetched, it should be cataloged first. + + Uncataloged data sets can be cataloged using the `zos_data_set <./zos_data_set.html>`_ module. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_data_set_module` + + + + +Return Values +------------- + + +path + The absolute path name onto which the file system is to be mounted. + + | **returned**: always + | **type**: str + | **sample**: /u/omvsadm/core + +src + The file in z/OS that is to be mounted. + + | **returned**: always + | **type**: str + | **sample**: SOMEUSER.VVV.ZFS + +fs_type + The type of file system that will perform the logical mount request. + + | **returned**: always + | **type**: str + | **sample**: ZFS + +state + The desired status of the described mount. + + | **returned**: always + | **type**: str + | **sample**: mounted + +persistent + Values the user provided as input. + + | **returned**: always + | **type**: dict + + data_store + The persistent store name where the mount was written to. + + | **returned**: always + | **type**: str + | **sample**: SYS1.FILESYS(BPXPRMAA) + + backup + Indicates if a backup of destinattion was configured. + + | **returned**: always + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + backup_name + The unique data set name for the destination backup. + + | **returned**: always + | **type**: str + | **sample**: SYS1.FILESYS(PRMAABAK) + + comment + The text that was used in markers around the *Persistent/data_store* entry. + + | **returned**: always + | **type**: list + | **sample**: + + .. code-block:: json + + [ + [ + "u\u0027I did this because..\u0027" + ] + ] + + +unmount_opts + Describes how the unmount is to be performed. + + | **returned**: changed and if state=unmounted + | **type**: str + | **sample**: drain + +mount_opts + Options available to the mount. + + | **returned**: whenever non-None + | **type**: str + | **sample**: rw,nosecurity + +src_params + Specifies a parameter string to be passed to the file system type. + + | **returned**: whenever non-None + | **type**: str + | **sample**: D(101) + +tag_untagged + Indicates if tags should be written to untagged files. + + | **returned**: whenever Non-None + | **type**: str + | **sample**: TEXT + +tag_ccsid + CCSID for untagged files in the mounted file system. + + | **returned**: when tag_untagged is defined + | **type**: int + | **sample**: 819 + +allow_uid + Whether the SETUID and SETGID mode bits on executables in this file system are considered. + + | **returned**: always + | **type**: bool + | **sample**: + + .. code-block:: json + + true + +sysname + *sysname* specifies the particular system on which a mount should be performed. + + | **returned**: if Non-None + | **type**: str + | **sample**: MVSSYS01 + +automove + Specifies what happens to the ownership of a file system during a shutdown, PFS termination, dead system takeover, or when file system move occurs. + + + | **returned**: if Non-None + | **type**: str + | **sample**: automove + +automove_list + This specifies the list of servers to include or exclude as destinations. + + | **returned**: if Non-None + | **type**: str + | **sample**: I,SERV01,SERV02,SERV03,SERV04 + +msg + Failure message returned by the module. + + | **returned**: failure + | **type**: str + | **sample**: Error while gathering information + +stdout + The stdout from the mount command. + + | **returned**: always + | **type**: str + | **sample**: MOUNT FILESYSTEM( 'source-dataset' ) MOUNTPOINT( '/uss-path' ) TYPE( ZFS ) + +stderr + The stderr from the mount command. + + | **returned**: failure + | **type**: str + | **sample**: No such file or directory "/tmp/foo" + +stdout_lines + List of strings containing individual lines from stdout. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "u\"MOUNT FILESYSTEM( \u0027source-dataset\u0027 ) MOUNTPOINT( \u0027/uss-path\u0027 ) TYPE( ZFS )\"" + ] + +stderr_lines + List of strings containing individual lines from stderr. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + { + "u\"FileNotFoundError": "No such file or directory \u0027/tmp/foo\u0027\"" + } + ] + +cmd + The actual command that was run by the module. + + | **returned**: failure + | **type**: str + | **sample**: MOUNT FILESYSTEM( 'EXAMPLE.DATA.SET' ) MOUNTPOINT( '/u/omvsadm/sample' ) TYPE( ZFS ) + +rc + The return code of the mount command, if applicable. + + | **returned**: failure + | **type**: int + | **sample**: 8 + diff --git a/docs/source/modules/zos_mvs_raw.rst b/docs/source/modules/zos_mvs_raw.rst new file mode 100644 index 0000000000..4d0abec6f2 --- /dev/null +++ b/docs/source/modules/zos_mvs_raw.rst @@ -0,0 +1,2083 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_mvs_raw.py + +.. _zos_mvs_raw_module: + + +zos_mvs_raw -- Run a z/OS program. +================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Run a z/OS program. +- This is analogous to a job step in JCL. +- Defaults will be determined by underlying API if value not provided. + + + + + +Parameters +---------- + + +program_name + The name of the z/OS program to run (e.g. IDCAMS, IEFBR14, IEBGENER etc.). + + | **required**: True + | **type**: str + + +parm + The program arguments (e.g. -a='MARGINS(1,72)'). + + | **required**: False + | **type**: str + + +auth + Determines whether this program should run with authorized privileges. + + If *auth=true*, the program runs as APF authorized. + + If *auth=false*, the program runs as unauthorized. + + | **required**: False + | **type**: bool + | **default**: False + + +verbose + Determines if verbose output should be returned from the underlying utility used by this module. + + When *verbose=true* verbose output is returned on module failure. + + | **required**: False + | **type**: bool + | **default**: False + + +max_rc + Specifies the maximum return code allowed for the program output. If the program generates a return code higher than the specified maximum, the module will fail. + + | **required**: False + | **type**: int + | **default**: 0 + + +dds + The input data source. + + *dds* supports 6 types of sources + + 1. *dd_data_set* for data set files. + + 2. *dd_unix* for UNIX files. + + 3. *dd_input* for in-stream data set. + + 4. *dd_dummy* for no content input. + + 5. *dd_concat* for a data set concatenation. + + 6. *dds* supports any combination of source types. + + | **required**: False + | **type**: list + | **elements**: dict + + + dd_data_set + Specify a data set. + + *dd_data_set* can reference an existing data set or be used to define a new data set to be created during execution. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + data_set_name + The data set name. + + A data set name can be a GDS relative name. + + When using GDS relative name and it is a positive generation, *disposition=new* must be used. + + | **required**: False + | **type**: str + + + raw + Create a new data set and let the MVS program assign its own default DCB attributes. + + When ``raw=true``, all supplied DCB attributes like disposition, space, volumes, SMS, keys, record settings, etc. are ignored. + + Using ``raw`` option is not possible for all programs, use this for cases where the MVS program that is called is able to assign its own default dataset attributes. + + | **required**: False + | **type**: bool + | **default**: False + + + type + The data set type. Only required when *disposition=new*. + + Maps to DSNTYPE on z/OS. + + | **required**: False + | **type**: str + | **choices**: library, pds, pdse, large, basic, seq, rrds, esds, lds, ksds + + + disposition + *disposition* indicates the status of a data set. + + Defaults to shr. + + | **required**: False + | **type**: str + | **choices**: new, shr, mod, old + + + disposition_normal + *disposition_normal* indicates what to do with the data set after a normal termination of the program. + + | **required**: False + | **type**: str + | **choices**: delete, keep, catalog, uncatalog + + + disposition_abnormal + *disposition_abnormal* indicates what to do with the data set after an abnormal termination of the program. + + | **required**: False + | **type**: str + | **choices**: delete, keep, catalog, uncatalog + + + reuse + Determines if a data set should be reused if *disposition=new* and if a data set with a matching name already exists. + + If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + + If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + + Mutually exclusive with *replace*. + + *reuse* is only considered when *disposition=new* + + | **required**: False + | **type**: bool + | **default**: False + + + replace + Determines if a data set should be replaced if *disposition=new* and a data set with a matching name already exists. + + If *replace=true*, the original data set will be deleted, and a new data set created. + + If *replace=false*, and a data set with a matching name already exists, allocation will fail. + + Mutually exclusive with *reuse*. + + *replace* is only considered when *disposition=new* + + *replace* will result in loss of all data in the original data set unless *backup* is specified. + + | **required**: False + | **type**: bool + | **default**: False + + + backup + Determines if a backup should be made of an existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. + + *backup* is only used when *replace=true*. + + | **required**: False + | **type**: bool + | **default**: False + + + space_type + The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + + | **required**: False + | **type**: str + | **choices**: trk, cyl, b, k, m, g + + + space_primary + The primary amount of space to allocate for a new data set. + + The value provided to *space_type* is used as the unit of space for the allocation. + + Not applicable when *space_type=blklgth* or *space_type=reclgth*. + + | **required**: False + | **type**: int + + + space_secondary + When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. + + The value provided to *space_type* is used as the unit of space for the allocation. + + Not applicable when *space_type=blklgth* or *space_type=reclgth*. + + | **required**: False + | **type**: int + + + volumes + The volume or volumes on which a data set resides or will reside. + + Do not specify the same volume multiple times. + + | **required**: False + | **type**: raw + + + sms_management_class + The desired management class for a new SMS-managed data set. + + *sms_management_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + sms_storage_class + The desired storage class for a new SMS-managed data set. + + *sms_storage_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + sms_data_class + The desired data class for a new SMS-managed data set. + + *sms_data_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + block_size + The maximum length of a block in bytes. + + Default is dependent on *record_format* + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + key_label + The label for the encryption key used by the system to encrypt the data set. + + *key_label* is the public name of a protected encryption key in the ICSF key repository. + + *key_label* should only be provided when creating an extended format data set. + + Maps to DSKEYLBL on z/OS. + + | **required**: False + | **type**: str + + + encryption_key_1 + The encrypting key used by the Encryption Key Manager. + + Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. + + | **required**: False + | **type**: dict + + + label + The label for the key encrypting key used by the Encryption Key Manager. + + Key label must have a private key associated with it. + + *label* can be a maximum of 64 characters. + + Maps to KEYLAB1 on z/OS. + + | **required**: True + | **type**: str + + + encoding + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. + + Maps to KEYCD1 on z/OS. + + | **required**: True + | **type**: str + | **choices**: l, h + + + + encryption_key_2 + The encrypting key used by the Encryption Key Manager. + + Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. + + | **required**: False + | **type**: dict + + + label + The label for the key encrypting key used by the Encryption Key Manager. + + Key label must have a private key associated with it. + + *label* can be a maximum of 64 characters. + + Maps to KEYLAB2 on z/OS. + + | **required**: True + | **type**: str + + + encoding + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. + + Maps to KEYCD2 on z/OS. + + | **required**: True + | **type**: str + | **choices**: l, h + + + + key_length + The length of the keys used in a new data set. + + If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + + Valid values are (0-255 non-vsam), (1-255 vsam). + + | **required**: False + | **type**: int + + + key_offset + The position of the first byte of the record key in each logical record of a new VSAM data set. + + The first byte of a logical record is position 0. + + Provide *key_offset* only for VSAM key-sequenced data sets. + + | **required**: False + | **type**: int + + + record_length + The logical record length. (e.g ``80``). + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + Valid values are (1-32760 for non-VSAM, 1-32761 for VSAM). + + Maps to LRECL on z/OS. + + | **required**: False + | **type**: int + + + record_format + The format and characteristics of the records for new data set. + + | **required**: False + | **type**: str + | **choices**: u, vb, vba, fb, fba + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the data set on the z/OS system. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the data set. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_unix + The path to a file in UNIX System Services (USS). + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + path + The path to an existing UNIX file. + + Or provide the path to an new created UNIX file when *status_group=OCREAT*. + + The provided path must be absolute. + + | **required**: True + | **type**: str + + + disposition_normal + Indicates what to do with the UNIX file after normal termination of the program. + + | **required**: False + | **type**: str + | **choices**: keep, delete + + + disposition_abnormal + Indicates what to do with the UNIX file after abnormal termination of the program. + + | **required**: False + | **type**: str + | **choices**: keep, delete + + + mode + The file access attributes when the UNIX file is created specified in *path*. + + Specify the mode as an octal number similarly to chmod. + + Maps to PATHMODE on z/OS. + + | **required**: False + | **type**: int + + + status_group + The status for the UNIX file specified in *path*. + + If you do not specify a value for the *status_group* parameter, the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + + Maps to PATHOPTS status group file options on z/OS. + + You can specify up to 6 choices. + + *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + + *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, a new directory and a new file are not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + + *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + + *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + + *ononblock* specifies the following, depending on the type of file + + For a FIFO special file + + 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + + 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + + 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + + 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + + 5. For a character special file that supports nonblocking open + + 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + + 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + + *ononblock* has no effect on other file types. + + *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + + *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + + When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + + | **required**: False + | **type**: list + | **elements**: str + | **choices**: oappend, ocreat, oexcl, onoctty, ononblock, osync, otrunc + + + access_group + The kind of access to request for the UNIX file specified in *path*. + + | **required**: False + | **type**: str + | **choices**: r, w, rw, read_only, write_only, read_write, ordonly, owronly, ordwr + + + file_data_type + The type of data that is (or will be) stored in the file specified in *path*. + + Maps to FILEDATA on z/OS. + + | **required**: False + | **type**: str + | **default**: binary + | **choices**: binary, text, record + + + block_size + The block size, in bytes, for the UNIX file. + + Default is dependent on *record_format* + + | **required**: False + | **type**: int + + + record_length + The logical record length for the UNIX file. + + *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + + Maps to LRECL on z/OS. + + | **required**: False + | **type**: int + + + record_format + The record format for the UNIX file. + + *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + + | **required**: False + | **type**: str + | **choices**: u, vb, vba, fb, fba + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the file on the z/OS system. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the file. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_input + *dd_input* is used to specify an in-stream data set. + + Input will be saved to a temporary data set with a record length of 80. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + content + The input contents for the DD. + + *dd_input* supports single or multiple lines of input. + + Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. + + If a list of strings is provided, newlines will be added to each of the lines when used as input. + + If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + + When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + + | **required**: True + | **type**: raw + + + reserved_cols + Determines how many columns at the beginning of the content are reserved with empty spaces. + + | **required**: False + | **type**: int + | **default**: 2 + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the data set on the z/OS system. + + for *dd_input*, *src_encoding* should generally not need to be changed. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the data set. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_output + Use *dd_output* to specify - Content sent to the DD should be returned to the user. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: True + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the data set on the z/OS system. + + for *dd_input*, *src_encoding* should generally not need to be changed. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the data set. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_dummy + Use *dd_dummy* to specify - No device or external storage space is to be allocated to the data set. - No disposition processing is to be performed on the data set. + + *dd_dummy* accepts no content input. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + + dd_vio + *dd_vio* is used to handle temporary data sets. + + VIO data sets reside in the paging space; but, to the problem program and the access method, the data sets appear to reside on a direct access storage device. + + You cannot use VIO for permanent data sets, VSAM data sets, or partitioned data sets extended (PDSEs). + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + + dd_volume + Use *dd_volume* to specify the volume to use in the DD statement. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + volume_name + The volume serial number. + + | **required**: True + | **type**: str + + + unit + Device type for the volume. + + This option is case sensitive. + + | **required**: True + | **type**: str + + + disposition + *disposition* indicates the status of a data set. + + | **required**: True + | **type**: str + | **choices**: new, shr, mod, old + + + + dd_concat + *dd_concat* is used to specify a data set concatenation. + + | **required**: False + | **type**: dict + + + dd_name + The DD name. + + | **required**: True + | **type**: str + + + dds + A list of DD statements, which can contain any of the following types: *dd_data_set*, *dd_unix*, and *dd_input*. + + | **required**: False + | **type**: list + | **elements**: dict + + + dd_data_set + Specify a data set. + + *dd_data_set* can reference an existing data set. The data set referenced with ``data_set_name`` must be allocated before the module `zos_mvs_raw <./zos_mvs_raw.html>`_ is run, you can use `zos_data_set <./zos_data_set.html>`_ to allocate a data set. + + | **required**: False + | **type**: dict + + + data_set_name + The data set name. + + A data set name can be a GDS relative name. + + When using GDS relative name and it is a positive generation, *disposition=new* must be used. + + | **required**: False + | **type**: str + + + raw + Create a new data set and let the MVS program assign its own default DCB attributes. + + When ``raw=true``, all supplied DCB attributes like disposition, space, volumes, SMS, keys, record settings, etc. are ignored. + + Using ``raw`` option is not possible for all programs, use this for cases where the MVS program that is called is able to assign its own default dataset attributes. + + | **required**: False + | **type**: bool + | **default**: False + + + type + The data set type. Only required when *disposition=new*. + + Maps to DSNTYPE on z/OS. + + | **required**: False + | **type**: str + | **choices**: library, pds, pdse, large, basic, seq, rrds, esds, lds, ksds + + + disposition + *disposition* indicates the status of a data set. + + Defaults to shr. + + | **required**: False + | **type**: str + | **choices**: new, shr, mod, old + + + disposition_normal + *disposition_normal* indicates what to do with the data set after normal termination of the program. + + | **required**: False + | **type**: str + | **choices**: delete, keep, catalog, uncatalog + + + disposition_abnormal + *disposition_abnormal* indicates what to do with the data set after abnormal termination of the program. + + | **required**: False + | **type**: str + | **choices**: delete, keep, catalog, uncatalog + + + reuse + Determines if data set should be reused if *disposition=new* and a data set with matching name already exists. + + If *reuse=true*, *disposition* will be automatically switched to ``SHR``. + + If *reuse=false*, and a data set with a matching name already exists, allocation will fail. + + Mutually exclusive with *replace*. + + *reuse* is only considered when *disposition=new* + + | **required**: False + | **type**: bool + | **default**: False + + + replace + Determines if data set should be replaced if *disposition=new* and a data set with matching name already exists. + + If *replace=true*, the original data set will be deleted, and a new data set created. + + If *replace=false*, and a data set with a matching name already exists, allocation will fail. + + Mutually exclusive with *reuse*. + + *replace* is only considered when *disposition=new* + + *replace* will result in loss of all data in the original data set unless *backup* is specified. + + | **required**: False + | **type**: bool + | **default**: False + + + backup + Determines if a backup should be made of existing data set when *disposition=new*, *replace=true*, and a data set with the desired name is found. + + *backup* is only used when *replace=true*. + + | **required**: False + | **type**: bool + | **default**: False + + + space_type + The unit of measurement to use when allocating space for a new data set using *space_primary* and *space_secondary*. + + | **required**: False + | **type**: str + | **choices**: trk, cyl, b, k, m, g + + + space_primary + The primary amount of space to allocate for a new data set. + + The value provided to *space_type* is used as the unit of space for the allocation. + + Not applicable when *space_type=blklgth* or *space_type=reclgth*. + + | **required**: False + | **type**: int + + + space_secondary + When primary allocation of space is filled, secondary space will be allocated with the provided size as needed. + + The value provided to *space_type* is used as the unit of space for the allocation. + + Not applicable when *space_type=blklgth* or *space_type=reclgth*. + + | **required**: False + | **type**: int + + + volumes + The volume or volumes on which a data set resides or will reside. + + Do not specify the same volume multiple times. + + | **required**: False + | **type**: raw + + + sms_management_class + The desired management class for a new SMS-managed data set. + + *sms_management_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + sms_storage_class + The desired storage class for a new SMS-managed data set. + + *sms_storage_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + sms_data_class + The desired data class for a new SMS-managed data set. + + *sms_data_class* is ignored if specified for an existing data set. + + All values must be between 1-8 alpha-numeric characters. + + | **required**: False + | **type**: str + + + block_size + The maximum length of a block in bytes. + + Default is dependent on *record_format* + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + key_label + The label for the encryption key used by the system to encrypt the data set. + + *key_label* is the public name of a protected encryption key in the ICSF key repository. + + *key_label* should only be provided when creating an extended format data set. + + Maps to DSKEYLBL on z/OS. + + | **required**: False + | **type**: str + + + encryption_key_1 + The encrypting key used by the Encryption Key Manager. + + Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. + + | **required**: False + | **type**: dict + + + label + The label for the key encrypting key used by the Encryption Key Manager. + + Key label must have a private key associated with it. + + *label* can be a maximum of 64 characters. + + Maps to KEYLAB1 on z/OS. + + | **required**: True + | **type**: str + + + encoding + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. + + Maps to KEYCD1 on z/OS. + + | **required**: True + | **type**: str + | **choices**: l, h + + + + encryption_key_2 + The encrypting key used by the Encryption Key Manager. + + Specification of the key labels does not by itself enable encryption. Encryption must be enabled by a data class that specifies an encryption format. + + | **required**: False + | **type**: dict + + + label + The label for the key encrypting key used by the Encryption Key Manager. + + Key label must have a private key associated with it. + + *label* can be a maximum of 64 characters. + + Maps to KEYLAB2 on z/OS. + + | **required**: True + | **type**: str + + + encoding + How the label for the key encrypting key specified by *label* is encoded by the Encryption Key Manager. + + *encoding* can either be set to ``l`` for label encoding, or ``h`` for hash encoding. + + Maps to KEYCD2 on z/OS. + + | **required**: True + | **type**: str + | **choices**: l, h + + + + key_length + The length of the keys used in a new data set. + + If using SMS, setting *key_length* overrides the key length defined in the SMS data class of the data set. + + Valid values are (0-255 non-vsam), (1-255 vsam). + + | **required**: False + | **type**: int + + + key_offset + The position of the first byte of the record key in each logical record of a new VSAM data set. + + The first byte of a logical record is position 0. + + Provide *key_offset* only for VSAM key-sequenced data sets. + + | **required**: False + | **type**: int + + + record_length + The logical record length. (e.g ``80``). + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + Valid values are (1-32760 for non-vsam, 1-32761 for vsam). + + Maps to LRECL on z/OS. + + | **required**: False + | **type**: int + + + record_format + The format and characteristics of the records for new data set. + + | **required**: False + | **type**: str + | **choices**: u, vb, vba, fb, fba + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the data set on the z/OS system. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the data set. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_unix + The path to a file in UNIX System Services (USS). + + | **required**: False + | **type**: dict + + + path + The path to an existing UNIX file. + + Or provide the path to an new created UNIX file when *status_group=ocreat*. + + The provided path must be absolute. + + | **required**: True + | **type**: str + + + disposition_normal + Indicates what to do with the UNIX file after normal termination of the program. + + | **required**: False + | **type**: str + | **choices**: keep, delete + + + disposition_abnormal + Indicates what to do with the UNIX file after abnormal termination of the program. + + | **required**: False + | **type**: str + | **choices**: keep, delete + + + mode + The file access attributes when the UNIX file is created specified in *path*. + + Specify the mode as an octal number similar to chmod. + + Maps to PATHMODE on z/OS. + + | **required**: False + | **type**: int + + + status_group + The status for the UNIX file specified in *path*. + + If you do not specify a value for the *status_group* parameter the module assumes that the pathname exists, searches for it, and fails the module if the pathname does not exist. + + Maps to PATHOPTS status group file options on z/OS. + + You can specify up to 6 choices. + + *oappend* sets the file offset to the end of the file before each write, so that data is written at the end of the file. + + *ocreat* specifies that if the file does not exist, the system is to create it. If a directory specified in the pathname does not exist, one is not created, and the new file is not created. If the file already exists and *oexcl* was not specified, the system allows the program to use the existing file. If the file already exists and *oexcl* was specified, the system fails the allocation and the job step. + + *oexcl* specifies that if the file does not exist, the system is to create it. If the file already exists, the system fails the allocation and the job step. The system ignores *oexcl* if *ocreat* is not also specified. + + *onoctty* specifies that if the PATH parameter identifies a terminal device, opening of the file does not make the terminal device the controlling terminal for the process. + + *ononblock* specifies the following, depending on the type of file + + For a FIFO special file + + 1. With *ononblock* specified and *ordonly* access, an open function for reading-only returns without delay. + + 2. With *ononblock* not specified and *ordonly* access, an open function for reading-only blocks (waits) until a process opens the file for writing. + + 3. With *ononblock* specified and *owronly* access, an open function for writing-only returns an error if no process currently has the file open for reading. + + 4. With *ononblock* not specified and *owronly* access, an open function for writing-only blocks (waits) until a process opens the file for reading. + + 5. For a character special file that supports nonblocking open + + 6. If *ononblock* is specified, an open function returns without blocking (waiting) until the device is ready or available. Device response depends on the type of device. + + 7. If *ononblock* is not specified, an open function blocks (waits) until the device is ready or available. + + *ononblock* has no effect on other file types. + + *osync* specifies that the system is to move data from buffer storage to permanent storage before returning control from a callable service that performs a write. + + *otrunc* specifies that the system is to truncate the file length to zero if all the following are true: the file specified exists, the file is a regular file, and the file successfully opened with *ordwr* or *owronly*. + + When *otrunc* is specified, the system does not change the mode and owner. *otrunc* has no effect on FIFO special files or character special files. + + | **required**: False + | **type**: list + | **elements**: str + | **choices**: oappend, ocreat, oexcl, onoctty, ononblock, osync, otrunc + + + access_group + The kind of access to request for the UNIX file specified in *path*. + + | **required**: False + | **type**: str + | **choices**: r, w, rw, read_only, write_only, read_write, ordonly, owronly, ordwr + + + file_data_type + The type of data that is (or will be) stored in the file specified in *path*. + + Maps to FILEDATA on z/OS. + + | **required**: False + | **type**: str + | **default**: binary + | **choices**: binary, text, record + + + block_size + The block size, in bytes, for the UNIX file. + + Default is dependent on *record_format* + + | **required**: False + | **type**: int + + + record_length + The logical record length for the UNIX file. + + *record_length* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + + Maps to LRECL on z/OS. + + | **required**: False + | **type**: int + + + record_format + The record format for the UNIX file. + + *record_format* is required in situations where the data will be processed as records and therefore, *record_length*, *block_size* and *record_format* need to be supplied since a UNIX file would normally be treated as a stream of bytes. + + | **required**: False + | **type**: str + | **choices**: u, vb, vba, fb, fba + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the file on the z/OS system. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the file. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + dd_input + *dd_input* is used to specify an in-stream data set. + + Input will be saved to a temporary data set with a record length of 80. + + | **required**: False + | **type**: dict + + + content + The input contents for the DD. + + *dd_input* supports single or multiple lines of input. + + Multi-line input can be provided as a multi-line string or a list of strings with 1 line per list item. + + If a list of strings is provided, newlines will be added to each of the lines when used as input. + + If a multi-line string is provided, use the proper block scalar style. YAML supports both `literal `_ and `folded `_ scalars. It is recommended to use the literal style indicator "|" with a block indentation indicator, for example; *content: | 2* is a literal block style indicator with a 2 space indentation, the entire block will be indented and newlines preserved. The block indentation range is 1 - 9. While generally unnecessary, YAML does support block `chomping `_ indicators "+" and "-" as well. + + When using the *content* option for instream-data, the module will ensure that all lines contain a blank in columns 1 and 2 and add blanks when not present while retaining a maximum length of 80 columns for any line. This is true for all *content* types; string, list of strings and when using a YAML block indicator. + + | **required**: True + | **type**: raw + + + reserved_cols + Determines how many columns at the beginning of the content are reserved with empty spaces. + + | **required**: False + | **type**: int + | **default**: 2 + + + return_content + Determines how content should be returned to the user. + + If not provided, no content from the DD is returned. + + | **required**: False + | **type**: dict + + + type + The type of the content to be returned. + + ``text`` means return content in encoding specified by *response_encoding*. + + *src_encoding* and *response_encoding* are only used when *type=text*. + + ``base64`` means return content as base64 encoded in binary. + + | **required**: True + | **type**: str + | **choices**: text, base64 + + + src_encoding + The encoding of the data set on the z/OS system. + + for *dd_input*, *src_encoding* should generally not need to be changed. + + | **required**: False + | **type**: str + | **default**: ibm-1047 + + + response_encoding + The encoding to use when returning the contents of the data set. + + | **required**: False + | **type**: str + | **default**: iso8859-1 + + + + + + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: List data sets matching pattern in catalog, + save output to a new sequential data set and return output as text. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: mypgm.output.ds + disposition: new + reuse: true + type: seq + space_primary: 5 + space_secondary: 1 + space_type: m + volumes: + - "000000" + record_format: fb + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: Run ADRDSSU to dump a dataset without having to specify the DCB attributes for dd_data_set by using raw option. + zos_mvs_raw: + program_name: ADRDSSU + auth: true + verbose: true + dds: + - dd_data_set: + dd_name: OUTDD + data_set_name: "USER.TEST.DUMP" + raw: true + - dd_input: + dd_name: SYSIN + content: | + DUMP DATASET(INCLUDE(USER.TEST.SOURCE)) - + OUTDDNAME(OUTDD) + - dd_output: + dd_name: SYSPRINT + return_content: + type: text + + - name: Full volume dump using ADDRDSU. + zos_mvs_raw: + program_name: adrdssu + auth: true + dds: + - dd_data_set: + dd_name: dumpdd + data_set_name: mypgm.output.ds + disposition: new + disposition_normal: catalog + disposition_abnormal: delete + space_type: cyl + space_primary: 10 + space_secondary: 10 + record_format: u + record_length: 0 + block_size: 32760 + type: seq + - dd_volume: + dd_name: voldd + volume_name: "000000" + unit: "3390" + disposition: old + - dd_input: + dd_name: sysin + content: " VOLDUMP VOL(voldd) DSNAME(dumpdd) FULL" + - dd_output: + dd_name: sysprint + return_content: + type: text + + - name: List data sets matching patterns in catalog, + save output to a new sequential data set and return output as text. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: mypgm.output.ds + disposition: new + reuse: true + type: seq + space_primary: 5 + space_secondary: 1 + space_type: m + volumes: + - "000000" + record_format: fb + return_content: + type: text + - dd_input: + dd_name: sysin + content: + - LISTCAT ENTRIES('SOME.DATASET.*') + - LISTCAT ENTRIES('SOME.OTHER.DS.*') + - LISTCAT ENTRIES('YET.ANOTHER.DS.*') + + - name: List data sets matching pattern in catalog, + save output to an existing sequential data set and + return output as text. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: mypgm.output.ds + disposition: shr + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a sequential data set. If the data set exists, + then reuse it, if it does not exist, create it. Returns output as text. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: mypgm.output.ds + disposition: new + reuse: true + type: seq + space_primary: 5 + space_secondary: 1 + space_type: m + volumes: + - "000000" + record_format: fb + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a sequential data set. If the data set exists, + then back up the existing data set and replace it. + If the data set does not exist, create it. + Returns backup name (if a backup was made) and output as text, + and backup name. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: mypgm.output.ds + disposition: new + replace: true + backup: true + type: seq + space_primary: 5 + space_secondary: 1 + space_type: m + volumes: + - "000000" + - "111111" + - "SCR002" + record_format: fb + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a file in UNIX System Services. + zos_raw: + save output to a file in UNIX System Services. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_unix: + dd_name: sysprint + path: /u/myuser/outputfile.txt + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a file in UNIX System Services. + Return the contents of the file in encoding IBM-1047, + while the file is encoded in ISO8859-1. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_unix: + dd_name: sysprint + path: /u/myuser/outputfile.txt + return_content: + type: text + src_encoding: iso8859-1 + response_encoding: ibm-1047 + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + return output to user, but don't store in persistent storage. + Return the contents of the file in encoding IBM-1047, + while the file is encoded in ISO8859-1. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_output: + dd_name: sysprint + return_content: + type: text + src_encoding: iso8859-1 + response_encoding: ibm-1047 + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: Take a set of data sets and write them to an archive. + zos_mvs_raw: + program_name: adrdssu + auth: true + dds: + - dd_data_set: + dd_name: archive + data_set_name: myhlq.stor.darv1 + disposition: old + - dd_data_set: + dd_name: sysin + data_set_name: myhlq.adrdssu.cmd + disposition: shr + - dd_dummy: + dd_name: sysprint + + - name: Merge two sequential data sets and write them to new data set + zos_mvs_raw: + program_name: sort + auth: false + parm: "MSGPRT=CRITICAL,LIST" + dds: + - dd_data_set: + dd_name: sortin01 + data_set_name: myhlq.dfsort.main + disposition: shr + - dd_data_set: + dd_name: sortin02 + data_set_name: myhlq.dfsort.new + - dd_input: + dd_name: sysin + content: " MERGE FORMAT=CH,FIELDS=(1,9,A)" + - dd_data_set: + dd_name: sortout + data_set_name: myhlq.dfsort.merge + type: seq + disposition: new + - dd_unix: + dd_name: sysout + path: /tmp/sortpgmoutput.txt + mode: 644 + status_group: + - ocreat + access_group: w + + - name: List data sets matching a pattern in catalog, + save output to a concatenation of data set members and + files. + zos_mvs_raw: + pgm: idcams + auth: true + dds: + - dd_concat: + dd_name: sysprint + dds: + - dd_data_set: + data_set_name: myhlq.ds1.out(out1) + - dd_data_set: + data_set_name: myhlq.ds1.out(out2) + - dd_data_set: + data_set_name: myhlq.ds1.out(out3) + - dd_unix: + path: /tmp/overflowout.txt + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SYS1.*')" + + - name: Drop the contents of input dataset into output dataset using REPRO command. + zos_mvs_raw: + pgm: idcams + auth: true + dds: + - dd_data_set: + dd_name: INPUT + data_set_name: myhlq.ds1.input + - dd_data_set: + dd_name: OUTPUT + data_set_name: myhlq.ds1.output + - dd_input: + dd_name: sysin + content: | + " REPRO - + INFILE(INPUT) - + OUTFILE(OUTPUT)" + - dd_output: + dd_name: sysprint + return_content: + type: text + + - name: Define a cluster using a literal block style indicator + with a 2 space indentation. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_output: + dd_name: sysprint + return_content: + type: text + - dd_input: + dd_name: sysin + content: 2 + DEFINE CLUSTER - + (NAME(ANSIBLE.TEST.VSAM) - + CYL(10 10) - + FREESPACE(20 20) - + INDEXED - + KEYS(32 0) - + NOERASE - + NONSPANNED - + NOREUSE - + SHAREOPTIONS(3 3) - + SPEED - + UNORDERED - + RECORDSIZE(4086 32600) - + VOLUMES(222222) - + UNIQUE) + + - name: Simple FTP connection using frist and second columns. + zos_mvs_raw: + program_name: AMAPDUPL + auth: true + dds: + - dd_output: + dd_name: sysprint + return_content: + type: text + - dd_data_set: + dd_name: SYSUT1 + data_set_name: myhlq.ds1.output + disposition: shr + - dd_input: + dd_name: sysin + reserved_cols: 0 + content: | + USERID=anonymous + PASSWORD=anonymous + TARGET_SYS=testcase.boulder.ibm.com + TARGET_DSN=wessamp.bigfile + + - name: List data sets matching pattern in catalog, + save output to a new generation of gdgs. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: TEST.CREATION(+1) + disposition: new + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: List data sets matching pattern in catalog, + save output to a gds already created. + zos_mvs_raw: + program_name: idcams + auth: true + dds: + - dd_data_set: + dd_name: sysprint + data_set_name: TEST.CREATION(-2) + return_content: + type: text + - dd_input: + dd_name: sysin + content: " LISTCAT ENTRIES('SOME.DATASET.*')" + + - name: Recall a migrated data set. + zos_mvs_raw: + program_name: ikjeft01 + auth: true + dds: + - dd_output: + dd_name: systsprt + return_content: + type: text + - dd_input: + dd_name: systsin + content: + - "HRECALL 'MY.DATASET' WAIT" + + + + +Notes +----- + +.. note:: + When executing programs using `zos_mvs_raw <./zos_mvs_raw.html>`_, you may encounter errors that originate in the programs implementation. Two such known issues are noted below of which one has been addressed with an APAR. + + 1. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking Database Image Copy 2 Utility or Database Recovery Utility in conjunction with FlashCopy or Fast Replication. + + 2. `zos_mvs_raw <./zos_mvs_raw.html>`_ module execution fails when invoking DFSRRC00 with parm "UPB,PRECOMP", "UPB, POSTCOMP" or "UPB,PRECOMP,POSTCOMP". This issue is addressed by APAR PH28089. + + 3. When executing a program, refer to the programs documentation as each programs requirments can vary fom DDs, instream-data indentation and continuation characters. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_data_set_module` + + + + +Return Values +------------- + + +ret_code + The return code. + + | **returned**: always + | **type**: dict + + code + The return code number returned from the program. + + | **type**: int + + +dd_names + All the related dds with the program. + + | **returned**: on success + | **type**: list + | **elements**: dict + + dd_name + The data definition name. + + | **type**: str + + name + The data set or path name associated with the data definition. + + | **type**: str + + content + The content contained in the data definition. + + | **type**: list + | **elements**: str + + record_count + The lines of the content. + + | **type**: int + + byte_count + The number of bytes in the response content. + + | **type**: int + + +backups + List of any data set backups made during execution. + + | **returned**: always + | **type**: dict + + original_name + The original data set name for which a backup was made. + + | **type**: str + + backup_name + The name of the data set containing the backup of content from data set in original_name. + + | **type**: str + + +stdout + The stdout from a USS command or MVS command, if applicable. + + | **returned**: always + | **type**: str + +stderr + The stderr of a USS command or MVS command, if applicable. + + | **returned**: failure + | **type**: str + diff --git a/docs/source/modules/zos_operator.rst b/docs/source/modules/zos_operator.rst new file mode 100644 index 0000000000..8710256f74 --- /dev/null +++ b/docs/source/modules/zos_operator.rst @@ -0,0 +1,212 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_operator.py + +.. _zos_operator_module: + + +zos_operator -- Execute operator command +======================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Execute an operator command and receive the output. + + + + + +Parameters +---------- + + +cmd + The command to execute. + + If the command contains single-quotations, another set of single quotes must be added. + + For example, change the command "...,P='DSN3EPX,-DBC1,S'" to "...,P=''DSN3EPX,-DBC1,S'' ". + + If the command contains any special characters ($, &, etc), they must be escaped using double backslashes like \\\\\\$. + + For example, to display job by job name the command would be ``cmd:"\\$dj''HELLO''"`` + + By default, the command will be converted to uppercase before execution, to control this behavior, see the *case_sensitive* option below. + + | **required**: True + | **type**: str + + +verbose + Return diagnostic messages that describes the commands execution, options, buffer and response size. + + | **required**: False + | **type**: bool + | **default**: False + + +wait_time_s + Set maximum time in seconds to wait for the commands to execute. + + When set to 0, the system default is used. + + This option is helpful on a busy system requiring more time to execute commands. + + Setting *wait* can instruct if execution should wait the full *wait_time_s*. + + | **required**: False + | **type**: int + | **default**: 1 + + +case_sensitive + If ``true``, the command will not be converted to uppercase before execution. Instead, the casing will be preserved just as it was written in a task. + + | **required**: False + | **type**: bool + | **default**: False + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Execute an operator command to show device status and allocation + zos_operator: + cmd: 'd u' + + - name: Execute an operator command to show device status and allocation with verbose information + zos_operator: + cmd: 'd u' + verbose: true + + - name: Execute an operator command to purge all job logs (requires escaping) + zos_operator: + cmd: "\\$PJ(*)" + + - name: Execute operator command to show jobs, always waiting 5 seconds for response + zos_operator: + cmd: 'd a,all' + wait_time_s: 5 + + - name: Display the system symbols and associated substitution texts. + zos_operator: + cmd: 'D SYMBOLS' + + + + +Notes +----- + +.. note:: + Commands may need to use specific prefixes like $, they can be discovered by issuing the following command ``D OPDATA,PREFIX``. + + + + + + + +Return Values +------------- + + +rc + Return code for the submitted operator command. + + | **returned**: always + | **type**: int + +cmd + Operator command submitted. + + | **returned**: always + | **type**: str + | **sample**: d u,all + +elapsed + The number of seconds that elapsed waiting for the command to complete. + + | **returned**: always + | **type**: float + | **sample**: + + .. code-block:: json + + 51.53 + +wait_time_s + The maximum time in seconds to wait for the commands to execute. + + | **returned**: always + | **type**: int + | **sample**: 5 + +content + The resulting text from the command submitted. + + | **returned**: on success + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "EC33017A 2022244 16:00:49.00 ISF031I CONSOLE OMVS0000 ACTIVATED", + "EC33017A 2022244 16:00:49.00 -D U,ALL ", + "EC33017A 2022244 16:00:49.00 IEE457I 16.00.49 UNIT STATUS 645", + " UNIT TYPE STATUS VOLSER VOLSTATE SS", + " 0000 3390 F-NRD /RSDNT 0", + " 0001 3211 OFFLINE 0", + " 0002 3211 OFFLINE 0", + " 0003 3211 OFFLINE 0", + " 0004 3211 OFFLINE 0", + " 0005 3211 OFFLINE 0", + " 0006 3211 OFFLINE 0", + " 0007 3211 OFFLINE 0", + " 0008 3211 OFFLINE 0", + " 0009 3277 OFFLINE 0", + " 000C 2540 A 0", + " 000D 2540 A 0", + " 000E 1403 A 0", + " 000F 1403 A 0", + " 0010 3211 A 0", + " 0011 3211 A 0" + ] + +changed + Indicates if any changes were made during module operation. Given operator commands may introduce changes that are unknown to the module. True is always returned unless either a module or command failure has occurred. + + | **returned**: always + | **type**: bool + | **sample**: + + .. code-block:: json + + true + diff --git a/docs/source/modules/zos_operator_action_query.rst b/docs/source/modules/zos_operator_action_query.rst new file mode 100644 index 0000000000..350f87266b --- /dev/null +++ b/docs/source/modules/zos_operator_action_query.rst @@ -0,0 +1,259 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_operator_action_query.py + +.. _zos_operator_action_query_module: + + +zos_operator_action_query -- Display messages requiring action +============================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Get a list of outstanding messages requiring operator action given one or more conditions. + + + + + +Parameters +---------- + + +system + Return outstanding messages requiring operator action awaiting a reply for a particular system. + + If the system name is not specified, all outstanding messages for that system and for the local systems attached to it are returned. + + A trailing asterisk, (*) wildcard is supported. + + | **required**: False + | **type**: str + + +message_id + Return outstanding messages requiring operator action awaiting a reply for a particular message identifier. + + If the message identifier is not specified, all outstanding messages for all message identifiers are returned. + + A trailing asterisk, (*) wildcard is supported. + + | **required**: False + | **type**: str + + +job_name + Return outstanding messages requiring operator action awaiting a reply for a particular job name. + + If the message job name is not specified, all outstanding messages for all job names are returned. + + A trailing asterisk, (*) wildcard is supported. + + | **required**: False + | **type**: str + + +message_filter + Return outstanding messages requiring operator action awaiting a reply that match a regular expression (regex) filter. + + If the message filter is not specified, all outstanding messages are returned regardless of their content. + + | **required**: False + | **type**: dict + + + filter + Specifies the substring or regex to match to the outstanding messages, see *use_regex*. + + All special characters in a filter string that are not a regex are escaped. + + Valid Python regular expressions are supported. See `the official documentation `_ for more information. + + Regular expressions are compiled with the flag **re.DOTALL** which makes the **'.'** special character match any character including a newline." + + | **required**: True + | **type**: str + + + use_regex + Indicates that the value for *filter* is a regex or a string to match. + + If False, the module assumes that *filter* is not a regex and matches the *filter* substring on the outstanding messages. + + If True, the module creates a regex from the *filter* string and matches it to the outstanding messages. + + | **required**: False + | **type**: bool + | **default**: False + + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Display all outstanding messages issued on system MV2H + zos_operator_action_query: + system: mv2h + + - name: Display all outstanding messages whose job name begin with im5 + zos_operator_action_query: + job_name: im5* + + - name: Display all outstanding messages whose message id begin with dsi* + zos_operator_action_query: + message_id: dsi* + + - name: Display all outstanding messages that have the text IMS READY in them + zos_operator_action_query: + message_filter: + filter: IMS READY + + - name: Display all outstanding messages where the job name begins with 'mq', + message ID begins with 'dsi', on system 'mv29' and which contain the + pattern 'IMS' + zos_operator_action_query: + job_name: mq* + message_id: dsi* + system: mv29 + message_filter: + filter: ^.*IMS.*$ + use_regex: true + + + + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_operator_module` + + + + +Return Values +------------- + + +changed + Indicates if any changes were made during module operation. Given operator action commands query for messages, True is always returned unless either a module or command failure has occurred. + + | **returned**: always + | **type**: bool + +count + The total number of outstanding messages. + + | **returned**: on success + | **type**: int + | **sample**: 12 + +actions + The list of the outstanding messages. + + | **returned**: success + | **type**: list + | **elements**: dict + | **sample**: + + .. code-block:: json + + [ + { + "job_id": "STC01537", + "job_name": "IM5HCONN", + "message_id": "HWSC0000I", + "message_text": "*399 HWSC0000I *IMS CONNECT READY* IM5HCONN", + "number": "001", + "system": "MV27", + "type": "R" + }, + { + "job_id": "STC01533", + "job_name": "IM5HCTRL", + "message_id": "DFS3139I", + "message_text": "*400 DFS3139I IMS INITIALIZED, AUTOMATIC RESTART PROCEEDING IM5H", + "number": "002", + "system": "MV27", + "type": "R" + } + ] + + number + The message identification number. + + | **returned**: on success + | **type**: int + | **sample**: 1 + + type + The action type,'R' means request. + + | **returned**: on success + | **type**: str + | **sample**: R + + system + System on which the outstanding message requiring operator action awaiting a reply. + + | **returned**: on success + | **type**: str + | **sample**: MV27 + + job_id + Job identifier for the outstanding message requiring operator action awaiting a reply. + + | **returned**: on success + | **type**: str + | **sample**: STC01537 + + message_text + Content of the outstanding message requiring operator action awaiting a reply. If *message_filter* is set, *message_text* will be filtered accordingly. + + | **returned**: success + | **type**: str + | **sample**: *399 HWSC0000I *IMS CONNECT READY* IM5HCONN + + job_name + Job name for outstanding message requiring operator action awaiting a reply. + + | **returned**: success + | **type**: str + | **sample**: IM5HCONN + + message_id + Message identifier for outstanding message requiring operator action awaiting a reply. + + | **returned**: success + | **type**: str + | **sample**: HWSC0000I + + diff --git a/docs/source/modules/zos_ping.rst b/docs/source/modules/zos_ping.rst new file mode 100644 index 0000000000..e98f5439a8 --- /dev/null +++ b/docs/source/modules/zos_ping.rst @@ -0,0 +1,88 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_ping.py + +.. _zos_ping_module: + + +zos_ping -- Ping z/OS and check dependencies. +============================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- `zos_ping <./zos_ping.html>`_ verifies the presence of z/OS Web Client Enablement Toolkit, iconv, and Python. +- `zos_ping <./zos_ping.html>`_ returns ``pong`` when the target host is not missing any required dependencies. +- If the target host is missing optional dependencies, the `zos_ping <./zos_ping.html>`_ will return one or more warning messages. +- If a required dependency is missing from the target host, an explanatory message will be returned with the module failure. + + + + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Ping the z/OS host and perform resource checks + zos_ping: + register: result + + + + +Notes +----- + +.. note:: + This module is written in REXX and relies on the SCP protocol to transfer the source to the managed z/OS node and encode it in the managed nodes default encoding, eg IBM-1047. Starting with OpenSSH 9.0, it switches from SCP to use SFTP by default, meaning transfers are no longer treated as text and are transferred as binary preserving the source files encoding resulting in a module failure. If you are using OpenSSH 9.0 (ssh -V) or later, you can instruct SSH to use SCP by adding the entry ``scp_extra_args="-O"`` into the ini file named ``ansible.cfg``. + + For more information, review the `ansible.builtin.ssh `_ module. + + + + + + + +Return Values +------------- + + +ping + Should contain the value "pong" on success. + + | **returned**: always + | **type**: str + | **sample**: pong + +warnings + List of warnings returned from stderr when performing resource checks. + + | **returned**: failure + | **type**: list + | **elements**: str + diff --git a/docs/source/modules/zos_replace.rst b/docs/source/modules/zos_replace.rst new file mode 100644 index 0000000000..70b2adf2f7 --- /dev/null +++ b/docs/source/modules/zos_replace.rst @@ -0,0 +1,304 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_replace.py + +.. _zos_replace_module: + + +zos_replace -- Replace all instances of a pattern within a file or data set. +============================================================================ + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The module `zos_replace. `_ can replace all instances of a pattern in the contents of a data set. + + + + + +Parameters +---------- + + +after + A regular expression that, if specified, determines which content will be replaced or removed **after** the match. + + Option *after* is the start position from where the module will seek to match the *regexp* pattern. When a pattern is matched, occurrences are substituted with the value set for *replace*. + + If option *after* is not set, the module will search from the beginning of the *target*. + + Option *after* is a regular expression as described in the `Python library `_. + + Option *after* can be used in combination with *before*. When combined with *before*, patterns are replaced or removed from *after* until the value set for *before*. + + Option *after* can be interpreted as a literal string instead of a regular expression by setting option *literal=after*. + + | **required**: False + | **type**: str + + +backup + Specifies whether a backup of the destination should be created before editing the source *target*. + + When set to ``true``, the module creates a backup file or data set. + + The backup file name will be returned if *backup* is ``true`` on either success or failure of module execution such that data can be retrieved. + + | **required**: False + | **type**: bool + | **default**: False + + +backup_name + Specify the USS file name or data set name for the destination backup. + + If *src* is a USS file or path, backup_name must be a file or path name, and it must be an absolute path name. + + If the source is an MVS data set, *backup_name* must be an MVS data set name, and the data set must **not** be preallocated. + + If it is a Generation Data Set (GDS), use a relative positive name, e.g., *SOME.CREATION(+1*). + + If *backup_name* is not provided, a default name will be used. If the source is a USS file or path, the name of the backup file will be the source file or path name appended with a timestamp, e.g. ``/path/file_name.2020-04-23-08-32-29-bak.tar``. + + If *src* is a seq data set and backup_name is not provided, the data set will be backed up to seq data set with a randomly generated name. + + If *src* is a data set member and backup_name is not provided, the data set member will be backed up to the same partitioned data set with a randomly generated member name. + + If *src* is a Generation Data Set (GDS) and backup_name is not provided, backup will be a sequential data set. + + | **required**: False + | **type**: str + + +before + A regular expression that if, specified, determines which content will be replaced or removed **before** the match. + + Option *before* is the end position from where the module will seek to match the *regexp* pattern. When a pattern is matched, occurrences are substituted with the value set for *replace*. + + If option *before* is not set, the module will search to the end of the *target*. + + Option *before* is a regular expression as described in the `Python library `_. + + Option *before* can be used in combination with *after*. When combined with *after*, patterns are replaced or removed from *after* until the value set for *before*. + + Option *before* can be interpreted as a literal string instead of a regular expression by setting option *literal=before*. + + | **required**: False + | **type**: str + + +encoding + The character set for data in the *target*. Module `zos_replace <./zos_replace.html>`_ requires the encoding to correctly read the content of a USS file or data set. If this parameter is not provided, this module assumes that USS file or data set is encoded in IBM-1047. + + Supported character sets rely on the charset conversion utility (iconv) version; the most common character sets are supported. + + | **required**: False + | **type**: str + | **default**: IBM-1047 + + +literal + If specified, it enables the module to interpret options *after*, *before* and *regexp* as a literal rather than a regular expression. + + Option *literal* uses any combination of V(after), V(before) and V(regexp). + + To interpret one option as a literal, use *literal=regexp*, *literal=after* or *literal=before*. + + To interpret multiple options as a literal, use a list such as ``['after', 'before']`` or ``['regex', 'after', 'before']`` + + | **required**: False + | **type**: raw + | **default**: [] + + +target + The location can be a UNIX System Services (USS) file, PS (sequential data set), PDS, PDSE, member of a PDS or PDSE. + + The USS file must be an absolute pathname. + + It is possible to use a generation data set (GDS) relative name of generation already created. e.g. *SOME.CREATION(-1*). + + | **required**: True + | **type**: str + + +tmp_hlq + Override the default High Level Qualifier (HLQ) for temporary and backup data sets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value of ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +regexp + The regular expression to look for in the contents of the file. + + | **required**: True + | **type**: str + + +replace + The string to replace *regexp* matches with. + + If not set, matches are removed entirely. + + | **required**: False + | **type**: str + + + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Replace 'profile/' pattern in USS file via blank substitution. + zos_replace: + target: /tmp/src/somefile + regexp: 'profile\/' + + - name: Replace regexp match with blank after line match in USS file. + zos_replace: + target: "/tmp/source" + regexp: '^MOUNTPOINT*' + after: export ZOAU_ROOT + + - name: Replace a specific line with special character on a dataset after a line, treating the text specified + for regexp as a literal string and after as regular expression. + zos_replace: + target: SAMPLE.SOURCE + regexp: //*LIB DD UNIT=SYS,SPACE=(TRK,(1,1)),VOL=SER=vvvvvv + replace: //*LIB DD UNIT=SYS,SPACE=(CYL,(1,1)) + after: '^\$source base \([^\s]+\)' + literal: regexp + + - name: Replace a specific line with special character on a dataset after a line, treating the text specified + for regexp and after as regular expression. + zos_replace: + target: SAMPLE.SOURCE + regexp: '\ \*\*LIB\ \ DD\ UNIT=SYS,SPACE=\(TRK,\(1,1\)\),VOL=SER=vvvvvv' + replace: //*LIB DD UNIT=SYS,SPACE=(CYL,(1,1)) + after: '^\$source base \([^\s]+\)' + literal: regexp + + - name: Replace a specific line before a specific sentence with backup, treating the text specified for regexp and before as literal strings. + zos_replace: + target: SAMPLE.SOURCE + backup: true + regexp: //SYSPRINT DD SYSOUT=* + before: SAMPLES OUTPUT SYSIN *=$DSN + literal: + - regexp + - before + + - name: Replace a specific line before a specific sentence with backup, treating the text specified for regexp and before as regular expression. + zos_replace: + target: SAMPLE.SOURCE + backup: true + regexp: '\ //SYSPRINT\ DD\ SYSOUT=\*' + before: '\ SAMPLES OUTPUT SYSIN\ \*\=\$DSN' + + - name: Replace 'var' with 'vars' between matched lines after and before with backup. + zos_replace: + target: SAMPLE.DATASET + tmp_hlq: ANSIBLE + backup: true + backup_name: BACKUP.DATASET + regexp: var + replace: vars + after: ^/tmp/source* + before: ^ if* + + - name: Replace lines on a GDS and generate a backup on the same GDG. + zos_replace: + target: SOURCE.GDG(0) + regexp: ^(IEE132I|IEA989I|IEA888I|IEF196I|IEA000I)\s.* + after: ^IEE133I PENDING * + before: ^IEE252I DEVICE * + backup: true + backup_name: "SOURCE.GDG(+1)" + + - name: Delete 'SYSTEM' calls via backref between matched lines in a PDS member. + zos_replace: + target: PDS.SOURCE(MEM) + regexp: '^(.*?SYSTEM.*?)SYSTEM(.*)' + replace: '\1\2' + after: IEE133I PENDING * + before: IEF456I JOB12345 * + + + + +Notes +----- + +.. note:: + For supported character sets used to encode data, refer to the `documentation `_. + + + + + + + +Return Values +------------- + + +backup_name + Name of the backup file or data set that was created. + + | **returned**: if backup=true + | **type**: str + | **sample**: /path/to/file.txt.2015-02-03@04:15 + +changed + Indicates if the source was modified. + + | **returned**: always + | **type**: bool + | **sample**: + + .. code-block:: json + + true + +found + Number of matches found + + | **returned**: success + | **type**: int + | **sample**: 5 + +msg + A string with a generic or error message relayed to the user. + + | **returned**: failure + | **type**: str + | **sample**: Parameter verification failed + +replaced + Fragment of the file that was changed + + | **returned**: always + | **type**: str + | **sample**: IEE134I TRACE DISABLED - MONITORING STOPPED + +target + The data set name or USS path that was modified. + + | **returned**: always + | **type**: str + | **sample**: ANSIBLE.USER.TEXT + diff --git a/docs/source/modules/zos_script.rst b/docs/source/modules/zos_script.rst new file mode 100644 index 0000000000..e85fdb14f0 --- /dev/null +++ b/docs/source/modules/zos_script.rst @@ -0,0 +1,419 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_script.py + +.. _zos_script_module: + + +zos_script -- Run scripts in z/OS +================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The `zos_script <./zos_script.html>`_ module runs a local or remote script in the remote machine. + + + + + +Parameters +---------- + + +chdir + Change the script's working directory to this path. + + When not specified, the script will run in the user's home directory on the remote machine. + + | **required**: False + | **type**: str + + +cmd + Path to the local or remote script followed by optional arguments. + + If the script path contains spaces, make sure to enclose it in two pairs of quotes. + + Arguments may need to be escaped so the shell in the remote machine handles them correctly. + + | **required**: True + | **type**: str + + +creates + Path to a file in the remote machine. If it exists, the script will not be executed. + + | **required**: False + | **type**: str + + +encoding + Specifies which encodings the script should be converted from and to. + + If ``encoding`` is not provided, the module determines which local and remote charsets to convert the data from and to. + + | **required**: False + | **type**: dict + + + from + The encoding to be converted from. + + | **required**: True + | **type**: str + + + to + The encoding to be converted to. + + | **required**: True + | **type**: str + + + +executable + Path of an executable in the remote machine to invoke the script with. + + When not specified, the system will assume the script is interpreted REXX and try to run it as such. Make sure to include a comment identifying the script as REXX at the start of the file in this case. + + | **required**: False + | **type**: str + + +remote_src + If set to ``false``, the module will search the script in the controller. + + If set to ``true``, the module will search the script in the remote machine. + + | **required**: False + | **type**: bool + + +removes + Path to a file in the remote machine. If it does not exist, the script will not be executed. + + | **required**: False + | **type**: str + + +use_template + Whether the module should treat ``src`` as a Jinja2 template and render it before continuing with the rest of the module. + + Only valid when ``src`` is a local file or directory. + + All variables defined in inventory files, vars files and the playbook will be passed to the template engine, as well as `Ansible special variables `_, such as ``playbook_dir``, ``ansible_version``, etc. + + If variables defined in different scopes share the same name, Ansible will apply variable precedence to them. You can see the complete precedence order `in Ansible's documentation `_ + + | **required**: False + | **type**: bool + | **default**: False + + +template_parameters + Options to set the way Jinja2 will process templates. + + Jinja2 already sets defaults for the markers it uses, you can find more information at its `official documentation `_. + + These options are ignored unless ``use_template`` is true. + + | **required**: False + | **type**: dict + + + variable_start_string + Marker for the beginning of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: {{ + + + variable_end_string + Marker for the end of a statement to print a variable in Jinja2. + + | **required**: False + | **type**: str + | **default**: }} + + + block_start_string + Marker for the beginning of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: {% + + + block_end_string + Marker for the end of a block in Jinja2. + + | **required**: False + | **type**: str + | **default**: %} + + + comment_start_string + Marker for the beginning of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: {# + + + comment_end_string + Marker for the end of a comment in Jinja2. + + | **required**: False + | **type**: str + | **default**: #} + + + line_statement_prefix + Prefix used by Jinja2 to identify line-based statements. + + | **required**: False + | **type**: str + + + line_comment_prefix + Prefix used by Jinja2 to identify comment lines. + + | **required**: False + | **type**: str + + + lstrip_blocks + Whether Jinja2 should strip leading spaces from the start of a line to a block. + + | **required**: False + | **type**: bool + | **default**: False + + + trim_blocks + Whether Jinja2 should remove the first newline after a block is removed. + + Setting this option to ``False`` will result in newlines being added to the rendered template. This could create invalid code when working with JCL templates or empty records in destination data sets. + + | **required**: False + | **type**: bool + | **default**: True + + + keep_trailing_newline + Whether Jinja2 should keep the first trailing newline at the end of a template after rendering. + + | **required**: False + | **type**: bool + | **default**: False + + + newline_sequence + Sequence that starts a newline in a template. + + | **required**: False + | **type**: str + | **default**: \\n + | **choices**: \\n, \\r, \\r\\n + + auto_reload + Whether to reload a template file when it has changed after the task has started. + + | **required**: False + | **type**: bool + | **default**: False + + + autoescape + Whether to enable autoescape of XML/HTML elements on a template. + + | **required**: False + | **type**: bool + | **default**: True + + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. +diff_mode + | **support**: none + | **description**: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Run a local REXX script on the managed z/OS node. + zos_script: + cmd: ./scripts/HELLO + + - name: Run a local REXX script with args on the managed z/OS node. + zos_script: + cmd: ./scripts/ARGS "1,2" + + - name: Run a remote REXX script while changing its working directory. + zos_script: + cmd: /u/user/scripts/ARGS "1,2" + remote_src: true + chdir: /u/user/output_dir + + - name: Run a local Python script in the temporary directory specified in the Ansible environment variable 'remote_tmp'. + zos_script: + cmd: ./scripts/program.py + executable: /usr/bin/python3 + + - name: Run a local script made from a template. + zos_script: + cmd: ./templates/PROGRAM + use_template: true + + - name: Run a script only when a file is not present. + zos_script: + cmd: ./scripts/PROGRAM + creates: /u/user/pgm_result.txt + + - name: Run a script only when a file is already present on the remote machine. + zos_script: + cmd: ./scripts/PROGRAM + removes: /u/user/pgm_input.txt + + - name: Run a shell script on the remote system + zos_script: + cmd: ./scripts/program.sh + executable: /bin/sh + remote_src: true + + + + +Notes +----- + +.. note:: + When executing local scripts, temporary storage will be used on the remote z/OS system. The size of the temporary storage will correspond to the size of the file being copied. + + The location in the z/OS system where local scripts will be copied to can be configured through Ansible's ``remote_tmp`` option. Refer to `Ansible's documentation `_ for more information. + + All local scripts copied to a remote z/OS system will be removed from the managed node before the module finishes executing. + + Execution permissions for the group assigned to the script will be added to remote scripts if they are missing. The original permissions for remote scripts will be restored by the module before the task ends. + + The module will only add execution permissions for the file owner. + + If executing REXX scripts, make sure to include a newline character on each line of the file. Otherwise, the interpreter may fail and return error ``BPXW0003I``. + + For supported character sets used to encode data, refer to the `documentation `_. + + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + + This module executes scripts inside z/OS UNIX System Services. For running REXX scripts contained in data sets or CLISTs, consider issuing a TSO command with `zos_tso_command <./zos_tso_command.html>`_. + + The community script module does not rely on Python to execute scripts on a managed node, while this module does. Python must be present on the remote machine. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_copy_module` + - :ref:`zos_tso_command_module` + + + + +Return Values +------------- + + +cmd + Original command issued by the user. + + | **returned**: changed + | **type**: str + | **sample**: ./scripts/PROGRAM + +remote_cmd + Command executed on the remote machine. Will show the executable path used, and when running local scripts, will also show the temporary file used. + + | **returned**: changed + | **type**: str + | **sample**: /tmp/zos_script.jycqqfny.ARGS 1,2 + +msg + Failure or skip message returned by the module. + + | **returned**: failure or skipped + | **type**: str + | **sample**: File /u/user/file.txt is already missing on the system, skipping script + +rc + Return code of the script. + + | **returned**: changed + | **type**: int + | **sample**: 16 + +stdout + The STDOUT from the script, may be empty. + + | **returned**: changed + | **type**: str + | **sample**: Allocation to SYSEXEC completed. + +stderr + The STDERR from the script, may be empty. + + | **returned**: changed + | **type**: str + | **sample**: An error has ocurred. + +stdout_lines + List of strings containing individual lines from STDOUT. + + | **returned**: changed + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "Allocation to SYSEXEC completed." + ] + +stderr_lines + List of strings containing individual lines from STDERR. + + | **returned**: changed + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "An error has ocurred" + ] + diff --git a/docs/source/modules/zos_started_task.rst b/docs/source/modules/zos_started_task.rst new file mode 100644 index 0000000000..ca59f32ee3 --- /dev/null +++ b/docs/source/modules/zos_started_task.rst @@ -0,0 +1,530 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_started_task.py + +.. _zos_started_task_module: + + +zos_started_task -- Perform operations on started tasks. +======================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- start, display, modify, cancel, force and stop a started task + + + + + +Parameters +---------- + + +arm + *arm* indicates to execute normal task termination routines without causing address space destruction. + + Only applicable when *state* is ``forced``, otherwise ignored. + + | **required**: False + | **type**: bool + + +armrestart + Indicates that the batch job or started task should be automatically restarted after CANCEL or FORCE completes, if it is registered as an element of the automatic restart manager. If the job or task is not registered or if you do not specify this parameter, MVS will not automatically restart the job or task. + + Only applicable when *state* is ``cancelled`` or ``forced``, otherwise ignored. + + | **required**: False + | **type**: bool + + +asidx + When *state* is ``cancelled``, ``stopped`` or ``forced``, *asidx* is the hexadecimal address space identifier of the work unit you want to cancel, stop or force. + + Only applicable when *state* is ``stopped``, ``cancelled``, or ``forced``, otherwise ignored. + + | **required**: False + | **type**: str + + +dump + Whether to perform a dump. The type of dump (SYSABEND, SYSUDUMP, or SYSMDUMP) depends on the JCL for the job. + + Only applicable when *state* is ``cancelled``, otherwise ignored. + + | **required**: False + | **type**: bool + + +identifier_name + Option *identifier_name* is the name that identifies the task. This name can be up to 8 characters long. The first character must be alphabetical. + + | **required**: False + | **type**: str + + +system_logs + When ``system_logs=true``, the module will return system logs that describe the task's execution. This option can return a big response depending on system load, also it could surface other program's activity. + + It is not recommended to have this option on all the time, but rather use it as a debugging option. + + | **required**: False + | **type**: bool + | **default**: False + + +job_account + Specifies accounting data in the JCL JOB statement for the started task. If the source JCL already had accounting data, the value that is specified on this parameter overrides it. + + Only applicable when *state* is ``started``, otherwise ignored. + + | **required**: False + | **type**: str + + +job_name + When *state* is started, this is the name which will be assigned to a started task while starting it. If *job_name* is not specified, then *member_name* is used as job's name. + + When *state* is ``displayed``, ``modified``, ``cancelled``, ``stopped``, or ``forced``, *job_name* is the started task name used to query the system. + + | **required**: False + | **type**: str + + +keyword_parameters + Any appropriate keyword parameter that you specify to override the corresponding parameter in the cataloged procedure. The maximum length of each keyword=option pair is 66 characters. No individual value within this field can be longer than 44 characters in length. + + Only applicable when *state* is ``started``, otherwise ignored. + + | **required**: False + | **type**: dict + + +member_name + Name of a member of a partitioned data set that contains the source JCL for the task to be started. The member can be either a job or a cataloged procedure. + + *member_name* is mandatory and only applicable when *state* is ``started``, otherwise ignored. + + | **required**: False + | **type**: str + + +parameters + Program parameters passed to the started program. + + Only applicable when *state* is ``started`` or ``modified``, otherwise ignored. + + For example, REFRESH or REPLACE parameters can be passed while modifying a started task. + + | **required**: False + | **type**: list + | **elements**: str + + +reusable_asid + When *reusable_asid* is ``True`` and REUSASID(YES) is specified in the DIAGxx parmlib member, a reusable ASID is assigned to the address space created by the START command. If *reusable_asid* is not specified or REUSASID(NO) is specified in DIAGxx, an ordinary ASID is assigned. + + Only applicable when *state* is ``started``, otherwise ignored. + + | **required**: False + | **type**: bool + + +state + *state* is the desired state of the started task after the module is executed. + + If *state* is ``started`` and the respective member is not present on the managed node, then error will be thrown with ``rc=1``, ``changed=false`` and *stderr* which contains error details. + + If *state* is ``cancelled``, ``modified``, ``displayed``, ``stopped`` or ``forced`` and the started task is not running on the managed node, then error will be thrown with ``rc=1``, ``changed=false`` and *stderr* contains error details. + + If *state* is ``displayed`` and the started task is running, then the module will return the started task details along with ``changed=true``. + + | **required**: True + | **type**: str + | **choices**: started, displayed, modified, cancelled, stopped, forced + + +subsystem + The name of the subsystem that selects the task for processing. The name must be 1-4 characters long, which are defined in the IEFSSNxx parmlib member, and the subsystem must be active. + + Only applicable when *state* is ``started``, otherwise ignored. + + | **required**: False + | **type**: str + + +task_id + A unique system-generated identifier that represents a specific started task running in z/OS. This id starts with STC. + + Only applicable when *state* is ``displayed``, ``modified``, ``cancelled``, ``stopped``, or ``forced``, otherwise ignored. + + | **required**: False + | **type**: str + + +user_id + The user ID of the time-sharing user you want to cancel or force. + + Only applicable when *state* is ``cancelled`` or ``forced``, otherwise ignored. + + | **required**: False + | **type**: str + + +verbose + When ``verbose=true``, the module will return the started task execution logs. + + | **required**: False + | **type**: bool + | **default**: False + + +wait_full_time + For a started task that takes time to initialize, *wait_time* with ``wait_full_time=true`` ensures the started task completes initialization and JES updates the system control blocks. + + If ``wait_full_time=false``, the module polls every 5 seconds to check the status of the started task and returns immediately once the task is successfully validated. + + When ``wait_full_time=true``, the module waits for the duration specified in *wait_time*, even after the started task operation has been successfully validated. + + | **required**: False + | **type**: bool + | **default**: False + + +wait_time + Total time that the module will wait for a submitted task, measured in seconds. The time begins when the module is executed on the managed node. Default value of 0 means to wait the default amount of time. + + The default value is 10 seconds if this value is not specified, or if the specified value is less than 10. + + | **required**: False + | **type**: int + | **default**: 10 + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Start a started task using a member in a partitioned data set. + zos_started_task: + state: "started" + member: "PROCAPP" + + - name: Start a started task using a member name and giving it an identifier. + zos_started_task: + state: "started" + member: "PROCAPP" + identifier: "SAMPLE" + + - name: Start a started task using both a member and a job name. + zos_started_task: + state: "started" + member: "PROCAPP" + job_name: "SAMPLE" + + - name: Start a started task and enable verbose output. + zos_started_task: + state: "started" + member: "PROCAPP" + job_name: "SAMPLE" + verbose: True + + - name: Start a started task and wait for 30 seconds before fetching task details. + zos_started_task: + state: "started" + member: "PROCAPP" + verbose: True + wait_time: 30 + wait_full_time: True + + - name: Start a started task specifying the subsystem and enabling a reusable ASID. + zos_started_task: + state: "started" + member: "PROCAPP" + subsystem: "MSTR" + reusable_asid: "YES" + + - name: Display a started task using a started task name. + zos_started_task: + state: "displayed" + task_name: "PROCAPP" + + - name: Display a started task using a started task id. + zos_started_task: + state: "displayed" + task_id: "STC00012" + + - name: Display all started tasks that begin with an s using a wildcard. + zos_started_task: + state: "displayed" + task_name: "s*" + + - name: Display all started tasks. + zos_started_task: + state: "displayed" + task_name: "all" + + - name: Cancel a started task using task name. + zos_started_task: + state: "cancelled" + task_name: "SAMPLE" + + - name: Cancel a started task using a started task id. + zos_started_task: + state: "cancelled" + task_id: "STC00093" + + - name: Cancel a started task using it's task name and ASID. + zos_started_task: + state: "cancelled" + task_name: "SAMPLE" + asidx: 0014 + + - name: Modify a started task's parameters. + zos_started_task: + state: "modified" + task_name: "SAMPLE" + parameters: ["XX=12"] + + - name: Modify a started task's parameters using a started task id. + zos_started_task: + state: "modified" + task_id: "STC00034" + parameters: ["XX=12"] + + - name: Stop a started task using it's task name. + zos_started_task: + state: "stopped" + task_name: "SAMPLE" + + - name: Stop a started task using a started task id. + zos_started_task: + state: "stopped" + task_id: "STC00087" + + - name: Stop a started task using it's task name, identifier and ASID. + zos_started_task: + state: "stopped" + task_name: "SAMPLE" + identifier: "SAMPLE" + asidx: 00A5 + + - name: Force a started task using it's task name. + zos_started_task: + state: "forced" + task_name: "SAMPLE" + + - name: Force a started task using it's task id. + zos_started_task: + state: "forced" + task_id: "STC00065" + + + + + + + + + + +Return Values +------------- + + +changed + True if the state was changed, otherwise False. + + | **returned**: always + | **type**: bool + +cmd + Command executed via opercmd to achieve the desired state. + + | **returned**: changed + | **type**: str + | **sample**: S SAMPLE + +msg + Failure or skip message returned by the module. + + | **returned**: failure or skipped + | **type**: str + | **sample**: Command parameters are invalid. + +rc + The return code is 0 when command executed successfully. + + The return code is 1 when opercmd throws any error. + + The return code is 4 when task_id format is invalid. + + The return code is 5 when any parameter validation failed. + + The return code is 8 when started task is not found using task_id. + + | **returned**: changed + | **type**: int + +state + The final state of the started task, after execution. + + | **returned**: success + | **type**: str + | **sample**: S SAMPLE + +stderr + The STDERR from the command, may be empty. + + | **returned**: failure + | **type**: str + | **sample**: An error has occurred. + +stderr_lines + List of strings containing individual lines from STDERR. + + | **returned**: failure + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "An error has occurred" + ] + +stdout + The STDOUT from the command, may be empty. + + | **returned**: success + | **type**: str + | **sample**: ISF031I CONSOLE OMVS0000 ACTIVATED. + +stdout_lines + List of strings containing individual lines from STDOUT. + + | **returned**: success + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "Allocation to SYSEXEC completed." + ] + +tasks + The output information for a list of started tasks matching specified criteria. + + If no started task is found then this will return empty. + + | **returned**: success + | **type**: list + | **elements**: dict + + asidx + Address space identifier (ASID), in hexadecimal. + + | **type**: str + | **sample**: 44 + + cpu_time + The processor time used by the address space, including the initiator. This time does not include SRB time. + + *cpu_time* format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). + + ``********`` when time exceeds 100000 hours. + + ``NOTAVAIL`` when the TOD clock is not working. + + | **type**: str + | **sample**: 00000.00.00.003 + + elapsed_time + For address spaces other than system address spaces, this value represents the elapsed time since the task was selected for execution. + + For system address spaces created before master scheduler initialization, this value represents the elapsed time since the master scheduler was initialized. + + For system address spaces created after master scheduler initialization, this value represents the elapsed time since the system address space was created. + + *elapsed_time* format is hhhhh.mm.ss.SSS(hours.minutes.seconds.milliseconds). + + ``********`` when time exceeds 100000 hours. + + ``NOTAVAIL`` when the TOD clock is not working. + + | **type**: str + | **sample**: 00003.20.23.013 + + started_time + The time when the started task started. + + ``********`` when time exceeds 100000 hours. + + ``NOTAVAIL`` when the TOD clock is not working. + + | **type**: str + | **sample**: 2025-09-11 18:21:50.293644+00:00 + + task_id + The started task id. + + | **type**: str + | **sample**: STC00018 + + task_identifier + The name of a system address space. + + The name of a step, for a job or attached APPC transaction program attached by an initiator. + + The identifier of a task created by the START command. + + The name of a step that called a cataloged procedure. + + ``STARTING`` if initiation of a started job, system task, or attached APPC transaction program is incomplete. + + ``*MASTER*`` for the master address space. + + The name of an initiator address space. + + | **type**: str + | **sample**: SPROC + + task_name + The name of the started task. + + | **type**: str + | **sample**: SAMPLE + + +verbose_output + If ``verbose=true``, the system logs related to the started task executed state will be shown. + + | **returned**: success + | **type**: str + | **sample**: 04.33.04 STC00077 ---- SUNDAY, 12 OCT 2025 ----.... + diff --git a/docs/source/modules/zos_stat.rst b/docs/source/modules/zos_stat.rst new file mode 100644 index 0000000000..4c1aa5b19f --- /dev/null +++ b/docs/source/modules/zos_stat.rst @@ -0,0 +1,1326 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_stat.py + +.. _zos_stat_module: + + +zos_stat -- Retrieve facts from MVS data sets, USS files, aggregates and generation data groups +=============================================================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The `zos_stat <./zos_stat.html>`_ module retrieves facts from resources stored in a z/OS system. +- Resources that can be queried are UNIX System Services files, data sets, generation data groups and aggregates. + + + + + +Parameters +---------- + + +name + Name of a data set, generation data group (GDG), aggregate, or a UNIX System Services file path, to query. + + Data sets can be sequential, partitioned (PDS), partitioned extended (PDSE), VSAMs or generation data sets (GDS). + + This option doesn't accept the use of wilcards (? and *). + + | **required**: True + | **type**: str + + +volumes + Name(s) of the volume(s) where the data set will be searched on. + + If omitted, the module will look up the master catalog to find all volumes where a data set is allocated. + + When used, if the data set is not found in at least one volume from the list, the module will fail with a "data set not found" message. + + | **required**: False + | **type**: list + | **elements**: str + + +type + Type of resource to query. + + | **required**: False + | **type**: str + | **default**: data_set + | **choices**: data_set, file, aggregate, gdg + + +sms_managed + Whether the data set is managed by the Storage Management Subsystem. + + It will cause the module to retrieve additional information, may take longer to query all attributes of a data set. + + If the data set is a PDSE and the Ansible user has RACF READ authority on it, retrieving SMS information will update the last referenced date of the data set. + + If the system finds the data set is not actually managed by SMS, the rest of the attributes will still be queried and this will be noted in the output from the task. + + | **required**: False + | **type**: bool + | **default**: False + + +recall + Whether to recall a migrated data set to fully query its attributes. + + If set to ``false``, the module will return a limited amount of information for a migrated data set. + + Recalling a data set will make the module take longer to execute. + + Ignored when the data set is not found to be migrated. + + The data set will not be migrated again afterwards. + + The data set will not get recalled when running the module in check mode. + + | **required**: False + | **type**: bool + | **default**: False + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary data sets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +follow + Whether to follow symlinks when querying files. + + | **required**: False + | **type**: bool + | **default**: False + + +get_mime + Whether to get information about the nature of a file, such as the charset and type of media it represents. + + | **required**: False + | **type**: bool + | **default**: True + + +get_checksum + Whether to compute a file's checksum and return it. Otherwise ignored. + + | **required**: False + | **type**: bool + | **default**: True + + +checksum_algorithm + Algorithm used to compute a file's checksum. + + Will throw an error if the managed node is unable to use the specified algorithm. + + | **required**: False + | **type**: str + | **default**: sha1 + | **choices**: md5, sha1, sha224, sha256, sha384, sha512 + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Get the attributes of a sequential data set. + zos_stat: + name: USER.SEQ.DATA + type: data_set + + - name: Get the attributes of a sequential data set on volume '000000'. + zos_stat: + name: USER.SEQ.DATA + type: data_set + volume: "000000" + + - name: Get the attributes of a sequential data set allocated on multiple volumes. + zos_stat: + name: USER.SEQ.DATA + type: data_set + volumes: + - "000000" + - "222222" + + - name: Get the attributes of a PDSE managed by SMS. + zos_stat: + name: USER.PDSE.DATA + type: data_set + sms_managed: true + + - name: Get the attributes of a sequential data set with a non-default temporary HLQ. + zos_stat: + name: USER.SEQ.DATA + type: data_set + tmp_hlq: "RESTRICT" + + - name: Get the attributes of a generation data group. + zos_stat: + name: "USER.GDG.DATA" + type: gdg + + - name: Get the attributes of a generation data set. + zos_stat: + name: "USER.GDG.DATA(-1)" + type: data_set + + - name: Get the attributes of an aggregate. + zos_stat: + name: "HLQ.USER.ZFS.DATA" + type: aggregate + + - name: Get the attributes of a file inside Unix System Services. + zos_stat: + name: "/u/user/file.txt" + type: file + get_checksum: true + + + + +Notes +----- + +.. note:: + When querying data sets, the module will create two temporary data sets. One requires around 4 kilobytes of available space on the managed node. The second one, around 1 kilobyte of available space. Both data sets will be removed before the module finishes execution. + + Sometimes, the system could be unable to properly determine the organization or record format of the data set or the space units used to represent its allocation. When this happens, the values for these fields will be null. + + When querying a partitioned data set (PDS), if the Ansible user has RACF READ authority on it, the last referenced date will be updated by the query operation. + + + +See Also +-------- + +.. seealso:: + + - :ref:`ansible.builtin.stat_module` + - :ref:`zos_find_module` + - :ref:`zos_gather_facts_module` + + + + +Return Values +------------- + + +stat + Dictionary containing information about the resource. + + Attributes that don't apply to the current resource will still be present on the dictionary with null values, so as to not break automation that relies on certain fields to be available. + + | **returned**: success + | **type**: dict + + name + Name of the resource queried. + + For Generation Data Sets (GDSs), this will be the absolute name. + + | **returned**: success + | **type**: str + | **sample**: USER.SEQ.DATA.SET + + resource_type + One of 'data_set', 'gdg', 'file' or 'aggregate'. + + | **returned**: success + | **type**: str + | **sample**: data_set + + exists + Whether name was found on the managed node. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + isfile + Whether name is a Unix System Services file. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + isdataset + Whether name is a data set. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + isaggregate + Whether name is an aggregate. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + isgdg + Whether name is a Generation Data Group. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + attributes + Dictionary containing all the stat data. + + | **returned**: success + | **type**: dict + + dsorg + Data set organization. + + | **returned**: success + | **type**: str + | **sample**: ps + + type + Type of the data set. + + | **returned**: success + | **type**: str + | **sample**: library + + record_format + Record format of a data set. + + | **returned**: success + | **type**: str + | **sample**: vb + + record_length + Record length of a data set. + + | **returned**: success + | **type**: int + | **sample**: 80 + + block_size + Block size of a data set. + + | **returned**: success + | **type**: int + | **sample**: 27920 + + has_extended_attrs + Whether a data set has extended attributes set. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + extended_attrs_bits + Current values of the EATTR bits for a data set. + + For files, it shows the current values of the extended attributes bits as a group of 4 characters. + + | **returned**: success + | **type**: str + | **sample**: opt + + creation_date + Date a data set was created. + + | **returned**: success + | **type**: str + | **sample**: 2025-01-27 + + creation_time + Time at which a data set was created. + + Only available when a data set has extended attributes. + + | **returned**: success + | **type**: str + | **sample**: 11:25:52 + + expiration_date + Expiration date of a data set. + + | **returned**: success + | **type**: str + | **sample**: 2030-12-31 + + last_reference + Date where the data set was last referenced. + + | **returned**: success + | **type**: str + | **sample**: 2025-01-28 + + updated_since_backup + Whether the data set has been updated since its last backup. + + | **returned**: success + | **type**: bool + + jcl_attrs + Dictionary containing the names of the JCL job and step that created a data set. + + Only available for data sets with extended attributes. + + | **returned**: success + | **type**: dict + + creation_job + JCL job that created the data set. + + | **returned**: success + | **type**: str + | **sample**: DSALLOC + + creation_step + JCL job step that created the data set. + + | **returned**: success + | **type**: str + | **sample**: ALLOC + + + volser + Name of the volume containing the data set. + + | **returned**: success + | **type**: str + | **sample**: 000000 + + num_volumes + Number of volumes where the data set resides. + + | **returned**: success + | **type**: int + | **sample**: 1 + + volumes + Names of the volumes where the data set resides. + + | **returned**: success + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + "000000", + "SCR03" + ] + + missing_volumes + When using the ``volumes`` option, this field will contain every volume specified in a task where the data set was missing. Will be an empty list in any other case. + + | **returned**: success + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + "222222", + "AUXVOL" + ] + + device_type + Generic device type where the data set resides. + + | **returned**: success + | **type**: str + | **sample**: 3390 + + space_units + Units used to describe sizes for the data set. + + | **returned**: success + | **type**: str + | **sample**: track + + primary_space + Primary allocation. + + Uses the space units defined in space_units. + + | **returned**: success + | **type**: int + | **sample**: 93 + + secondary_space + Secondary allocation. + + Uses the space units defined in space_units. + + | **returned**: success + | **type**: int + | **sample**: 56 + + allocation_available + Total allocation of the data set. + + Uses the space units defined in space_units. + + | **returned**: success + | **type**: int + | **sample**: 93 + + allocation_used + Total allocation used by the data set. + + Uses the space units defined in space_units. + + | **returned**: success + | **type**: int + + extents_allocated + Number of extents allocated for the data set. + + | **returned**: success + | **type**: int + | **sample**: 1 + + extents_used + Number of extents used by the data set. + + For PDSEs, this value will be null. See instead pages_used and perc_pages_used. + + | **returned**: success + | **type**: int + | **sample**: 1 + + blocks_per_track + Blocks per track for the unit contained in space_units. + + | **returned**: success + | **type**: int + | **sample**: 2 + + tracks_per_cylinder + Tracks per cylinder for the unit contained in space_units. + + | **returned**: success + | **type**: int + | **sample**: 15 + + sms_data_class + The SMS data class name. + + Only returned when the data set is managed by SMS and sms_managed is set to true. + + | **returned**: success + | **type**: str + | **sample**: standard + + sms_mgmt_class + The SMS management class name. + + Only returned when the data set is managed by SMS and sms_managed is set to true. + + | **returned**: success + | **type**: str + | **sample**: vsam + + sms_storage_class + The SMS storage class name. + + Only returned when the data set is managed by SMS and sms_managed is set to true. + + | **returned**: success + | **type**: str + | **sample**: fast + + encrypted + Whether the data set is encrypted. + + | **returned**: success + | **type**: bool + + key_status + Whether the data set has a password set to read/write. + + Value can be either one of 'none', 'read' or 'write'. + + For VSAMs, the value can also be 'supp', when the module is unable to query its security attributes. + + | **returned**: success + | **type**: str + | **sample**: none + + racf + Whether there is RACF protection set on the data set. + + Value can be either one of 'none', 'generic' or 'discrete' for non-VSAM data sets. + + For VSAMs, the value can be either 'yes' or 'no'. + + | **returned**: success + | **type**: str + | **sample**: none + + key_label + The encryption key label for an encrypted data set. + + | **returned**: success + | **type**: str + | **sample**: keydsn + + dir_blocks_allocated + Number of directory blocks allocated for a PDS. + + For PDSEs, this value will be null. See instead pages_used and perc_pages_used. + + | **returned**: success + | **type**: int + | **sample**: 5 + + dir_blocks_used + Number of directory blocks used by a PDS. + + For PDSEs, this value will be null. See instead pages_used and perc_pages_used. + + | **returned**: success + | **type**: int + | **sample**: 2 + + members + Number of members inside a partitioned data set. + + | **returned**: success + | **type**: int + | **sample**: 3 + + pages_allocated + Number of pages allocated to a PDSE. + + | **returned**: success + | **type**: int + | **sample**: 1116 + + pages_used + Number of pages used by a PDSE. The pages are 4K in size. + + | **returned**: success + | **type**: int + | **sample**: 5 + + perc_pages_used + Percentage of pages used by a PDSE. + + Gets rounded down to the nearest integer value. + + | **returned**: success + | **type**: int + | **sample**: 10 + + pdse_version + PDSE data set version. + + | **returned**: success + | **type**: int + | **sample**: 1 + + max_pdse_generation + Maximum number of generations of a member that can be maintained in a PDSE. + + | **returned**: success + | **type**: int + + seq_type + Type of sequential data set (when it applies). + + Value can be either one of 'basic', 'large' or 'extended'. + + | **returned**: success + | **type**: str + | **sample**: basic + + data + Dictionary containing attributes for the DATA component of a VSAM. + + For the rest of the attributes of this data set, query it directly with this module. + + | **returned**: success + | **type**: dict + + key_length + Key length for data records, in bytes. + + | **returned**: success + | **type**: int + | **sample**: 4 + + key_offset + Key offset for data records. + + | **returned**: success + | **type**: int + | **sample**: 3 + + max_record_length + Maximum length of data records, in bytes. + + | **returned**: success + | **type**: int + | **sample**: 80 + + avg_record_length + Average length of data records, in bytes. + + | **returned**: success + | **type**: int + | **sample**: 80 + + bufspace + Minimum buffer space in bytes to be provided by a processing program. + + | **returned**: success + | **type**: int + | **sample**: 37376 + + total_records + Total number of records. + + | **returned**: success + | **type**: int + | **sample**: 50 + + spanned + Whether the data set allows records to be spanned across control intervals. + + | **returned**: success + | **type**: bool + + volser + Name of the volume containing the DATA component. + + | **returned**: success + | **type**: str + | **sample**: 000000 + + device_type + Generic device type where the DATA component resides. + + | **returned**: success + | **type**: str + | **sample**: 3390 + + + index + Dictionary containing attributes for the INDEX component of a VSAM. + + For the rest of the attributes of this data set, query it directly with this module. + + | **returned**: success + | **type**: dict + + key_length + Key length for index records, in bytes. + + | **returned**: success + | **type**: int + | **sample**: 4 + + key_offset + Key offset for index records. + + | **returned**: success + | **type**: int + | **sample**: 3 + + max_record_length + Maximum length of index records, in bytes. + + | **returned**: success + | **type**: int + + avg_record_length + Average length of index records, in bytes. + + | **returned**: success + | **type**: int + | **sample**: 505 + + bufspace + Minimum buffer space in bytes to be provided by a processing program. + + | **returned**: success + | **type**: int + + total_records + Total number of records. + + | **returned**: success + | **type**: int + + volser + Name of the volume containing the INDEX component. + + | **returned**: success + | **type**: str + | **sample**: 000000 + + device_type + Generic device type where the INDEX component resides. + + | **returned**: success + | **type**: str + | **sample**: 3390 + + + limit + Maximum amount of active generations allowed in a GDG. + + | **returned**: success + | **type**: int + | **sample**: 10 + + scratch + Whether the GDG has the SCRATCH attribute set. + + | **returned**: success + | **type**: bool + + empty + Whether the GDG has the EMPTY attribute set. + + | **returned**: success + | **type**: bool + + order + Allocation order of new Generation Data Sets for a GDG. + + Value can be either 'lifo' or 'fifo'. + + | **returned**: success + | **type**: str + | **sample**: lifo + + purge + Whether the GDG has the PURGE attribute set. + + | **returned**: success + | **type**: bool + + extended + Whether the GDG has the EXTENDED attribute set. + + | **returned**: success + | **type**: bool + + active_gens + List of the names of the currently active generations of a GDG. + + | **returned**: success + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + "USER.GDG.G0001V00", + "USER.GDG.G0002V00" + ] + + auditfid + File system identification string for an aggregate. + + | **returned**: success + | **type**: str + | **sample**: C3C6C3F0 F0F3000E 0000 + + bitmap_file_size + Size in K of an aggregate's bitmap file. + + | **returned**: success + | **type**: int + | **sample**: 8 + + converttov5 + Value of the converttov5 flag of an aggregate. + + | **returned**: success + | **type**: bool + + filesystem_table_size + Size in K of an aggregate's filesystem table. + + | **returned**: success + | **type**: int + | **sample**: 16 + + free + Kilobytes still free in an aggregate. + + | **returned**: success + | **type**: int + | **sample**: 559 + + free_1k_fragments + Number of free 1-KB fragments in an aggregate. + + | **returned**: success + | **type**: int + | **sample**: 7 + + free_8k_blocks + Number of free 8-KB blocks in an aggregate. + + | **returned**: success + | **type**: int + | **sample**: 69 + + log_file_size + Size in K of an aggregate's log file. + + | **returned**: success + | **type**: int + | **sample**: 112 + + sysplex_aware + Value of the sysplex_aware flag of an aggregate. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + total_size + Total K available in an aggregate. + + | **returned**: success + | **type**: int + | **sample**: 648000 + + version + Version of an aggregate. + + | **returned**: success + | **type**: str + | **sample**: 1.5 + + quiesced + Attributes available when an aggregate has been quiesced. + + | **returned**: success + | **type**: dict + + job + Name of the job that quiesced the aggregate. + + | **returned**: success + | **type**: str + | **sample**: USERJOB + + system + Name of the system that quiesced the aggregate. + + | **returned**: success + | **type**: str + | **sample**: GENSYS + + timestamp + Timestamp of the quiesce operation. + + | **returned**: success + | **type**: str + | **sample**: 2025-02-01T18:02:05 + + + mode + Octal representation of a file's permissions. + + | **returned**: success + | **type**: str + | **sample**: 0755 + + atime + Time of last access for a file. + + | **returned**: success + | **type**: str + | **sample**: 2025-02-23T13:03:45 + + mtime + Time of last modification of a file. + + | **returned**: success + | **type**: str + | **sample**: 2025-02-23T13:03:45 + + ctime + Time of last metadata update or creation for a file. + + | **returned**: success + | **type**: str + | **sample**: 2025-02-23T13:03:45 + + checksum + Checksum of the file computed by the hashing algorithm specified in ``checksum_algorithm``. + + Will be null if ``get_checksum=false``. + + | **returned**: success + | **type**: str + | **sample**: 2025-02-23T13:03:45 + + uid + ID of the file's owner. + + | **returned**: success + | **type**: int + + gid + ID of the file's group. + + | **returned**: success + | **type**: int + | **sample**: 1 + + size + Size of the file in bytes. + + | **returned**: success + | **type**: int + | **sample**: 9840 + + inode + Inode number of the path. + + | **returned**: success + | **type**: int + | **sample**: 1671 + + dev + Device the inode resides on. + + | **returned**: success + | **type**: int + | **sample**: 1 + + nlink + Number of links to the inode. + + | **returned**: success + | **type**: int + | **sample**: 1 + + isdir + Whether the path is a directory. + + | **returned**: success + | **type**: bool + + ischr + Whether the path is a character device. + + | **returned**: success + | **type**: bool + + isblk + Whether the path is a block device. + + | **returned**: success + | **type**: bool + + isreg + Whether the path is a regular file. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + isfifo + Whether the path is a named pipe. + + | **returned**: success + | **type**: bool + + islnk + Whether the file is a symbolic link. + + | **returned**: success + | **type**: bool + + issock + Whether the file is a Unix domain socket. + + | **returned**: success + | **type**: bool + + isuid + Whether the Ansible user's ID matches the owner's ID. + + | **returned**: success + | **type**: bool + + isgid + Whether the Ansible user's group matches the owner's group. + + | **returned**: success + | **type**: bool + + wusr + Whether the file's owner has write permission. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + rusr + Whether the file's owner has read permission. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + xusr + Whether the file's owner has execute permission. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + wgrp + Whether the file's group has write permission. + + | **returned**: success + | **type**: bool + + rgrp + Whether the file's group has read permission. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + xgrp + Whether the file's group has execute permission. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + woth + Whether others have write permission over the file. + + | **returned**: success + | **type**: bool + + roth + Whether others have read permission over the file. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + xoth + Whether others have execute permission over the file. + + | **returned**: success + | **type**: bool + + writeable + Whether the Ansible user can write to the path. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + readable + Whether the Ansible user can read the path. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + executable + Whether the Ansible user can execute the path. + + | **returned**: success + | **type**: bool + | **sample**: + + .. code-block:: json + + true + + pw_name + User name of the file's owner. + + | **returned**: success + | **type**: str + | **sample**: username + + gr_name + Group name of the file's owner. + + | **returned**: success + | **type**: str + | **sample**: group + + lnk_source + Absolute path to the target of a symlink. + + | **returned**: success + | **type**: str + | **sample**: /etc/foobar/file + + lnk_target + Target of a symlink. + + Preserves relative paths. + + | **returned**: success + | **type**: str + | **sample**: ../foobar/file + + charset + Current encoding tag associated with the file. + + This tag does not necessarily correspond with the actual encoding of the file. + + | **returned**: success + | **type**: str + | **sample**: IBM-1047 + + mimetype + Output from the file utility describing the content. + + Will be null if ``get_mime=false``. + + | **returned**: success + | **type**: str + | **sample**: commands text + + audit_bits + Audit bits for the file. Contains two sets of 3 bits. + + First 3 bits describe the user-requested audit information. + + Last 3 bits describe the auditor-requested audit information. + + For each set, the bits represent read, write and execute/search audit options. + + An 's' means to audit successful access attempts. + + An 'f' means to audit failed access attempts. + + An 'a' means to audit all access attempts. + + An '-' means to not audit accesses. + + | **returned**: success + | **type**: str + | **sample**: fff--- + + file_format + File format (for regular files). One of "null", "bin" or "rec". + + Text data delimiter for a file. One of "nl", "cr", "lf", "crlf", "lfcr" or "crnl". + + | **returned**: success + | **type**: str + | **sample**: bin + + + diff --git a/docs/source/modules/zos_tso_command.rst b/docs/source/modules/zos_tso_command.rst new file mode 100644 index 0000000000..576ecefa1b --- /dev/null +++ b/docs/source/modules/zos_tso_command.rst @@ -0,0 +1,171 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_tso_command.py + +.. _zos_tso_command_module: + + +zos_tso_command -- Execute TSO commands +======================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Execute TSO commands on the target z/OS system with the provided options and receive a structured response. + + + + + +Parameters +---------- + + +commands + One or more TSO commands to execute on the target z/OS system. + + Accepts a single string or list of strings as input. + + If a list of strings is provided, processing will stop at the first failure, based on rc. + + | **required**: True + | **type**: raw + + +max_rc + Specifies the maximum return code allowed for a TSO command. + + If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + + | **required**: False + | **type**: int + | **default**: 0 + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Execute TSO commands to allocate a new dataset. + zos_tso_command: + commands: + - alloc da('TEST.HILL3.TEST') like('TEST.HILL3') + - delete 'TEST.HILL3.TEST' + + - name: Execute TSO command List User (LU) for TESTUSER to obtain TSO information. + zos_tso_command: + commands: + - LU TESTUSER + + - name: Execute TSO command List Dataset (LISTDSD) and allow for maximum return code of 4. + zos_tso_command: + commands: + - LISTDSD DATASET('HLQ.DATA.SET') ALL GENERIC + max_rc: 4 + + - name: Execute TSO command to run a REXX script explicitly from a data set. + zos_tso_command: + commands: + - EXEC HLQ.DATASET.REXX exec + + - name: Chain multiple TSO commands into one invocation using semicolons. + zos_tso_command: + commands: >- + ALLOCATE DDNAME(IN1) DSNAME('HLQ.PDSE.DATA.SRC(INPUT)') SHR; + ALLOCATE DDNAME(OUT1) DSNAME('HLQ.PDSE.DATA.DEST(OUTPUT)') SHR; + OCOPY INDD(IN1) OUTDD(OUT1) BINARY; + + - name: Recall a migrated data set. + zos_tso_command: + commands: + - HRECALL 'MY.DATASET' WAIT + + + + + + + + + + +Return Values +------------- + + +output + List of each TSO command output. + + | **returned**: always + | **type**: list + | **elements**: dict + + command + The executed TSO command. + + | **returned**: always + | **type**: str + + rc + The return code from the executed TSO command. + + | **returned**: always + | **type**: int + + max_rc + Specifies the maximum return code allowed for a TSO command. + + If more than one TSO command is submitted, the *max_rc* applies to all TSO commands. + + | **returned**: always + | **type**: int + + content + The response resulting from the execution of the TSO command. + + | **returned**: always + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "NO MODEL DATA SET OMVSADM", + "TERMUACC ", + "SUBGROUP(S)= VSAMDSET SYSCTLG BATCH SASS MASS IMSGRP1 ", + " IMSGRP2 IMSGRP3 DSNCAT DSN120 J42 M63 ", + " J91 J09 J97 J93 M82 D67 ", + " D52 M12 CCG D17 M32 IMSVS ", + " DSN210 DSN130 RAD CATLG4 VCAT CSP " + ] + + lines + The line number of the content. + + | **returned**: always + | **type**: int + + diff --git a/docs/source/modules/zos_unarchive.rst b/docs/source/modules/zos_unarchive.rst new file mode 100644 index 0000000000..eec87c3eca --- /dev/null +++ b/docs/source/modules/zos_unarchive.rst @@ -0,0 +1,555 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_unarchive.py + +.. _zos_unarchive_module: + + +zos_unarchive -- Unarchive files and data sets in z/OS. +======================================================= + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The ``zos_unarchive`` module unpacks an archive after optionally transferring it to the remote system. +- For supported archive formats, see option ``format``. +- Supported sources are USS (UNIX System Services) or z/OS data sets. +- Mixing MVS data sets with USS files for unarchiving is not supported. +- The archive is sent to the remote as binary, so no encoding is performed. + + + + + +Parameters +---------- + + +src + The remote absolute path or data set of the archive to be uncompressed. + + *src* can be a USS file or MVS data set name. + + USS file paths should be absolute paths. + + MVS data sets supported types are ``SEQ``, ``PDS``, ``PDSE``. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + | **required**: True + | **type**: str + + +format + The compression type and corresponding options to use when archiving data. + + | **required**: True + | **type**: dict + + + name + The compression format used while archiving. + + | **required**: True + | **type**: str + | **choices**: bz2, gz, tar, zip, terse, xmit, pax + + + format_options + Options specific to a compression format. + + | **required**: False + | **type**: dict + + + xmit_log_data_set + Provide the name of a data set to store xmit log output. + + If the data set provided does not exist, the program will create it. + + If the data set provided exists, the data set must have the following attributes: LRECL=255, BLKSIZE=3120, and RECFM=VB + + When providing the *xmit_log_data_set* name, ensure there is adequate space. + + | **required**: False + | **type**: str + + + use_adrdssu + If set to true, the ``zos_unarchive`` module will use Data Facility Storage Management Subsystem data set services (DFSMSdss) program ADRDSSU to uncompress data sets from a portable format after using ``xmit`` or ``terse``. + + | **required**: False + | **type**: bool + | **default**: False + + + dest_volumes + When *use_adrdssu=True*, specify the volume the data sets will be written to. + + If no volume is specified, storage management rules will be used to determine the volume where the file will be unarchived. + + If the storage administrator has specified a system default unit name and you do not set a volume name for non-system-managed data sets, then the system uses the volumes associated with the default unit name. Check with your storage administrator to determine whether a default unit name has been specified. + + | **required**: False + | **type**: list + | **elements**: str + + + + +dest + The remote absolute path or data set where the content should be unarchived to. + + *dest* can be a USS file, directory or MVS data set name. + + If dest has missing parent directories, they will not be created. + + | **required**: False + | **type**: str + + +group + Name of the group that will own the file system objects. + + When left unspecified, it uses the current group of the current user unless you are root, in which case it can preserve the previous ownership. + + This option is only applicable if ``dest`` is USS, otherwise ignored. + + | **required**: False + | **type**: str + + +mode + The permission of the uncompressed files. + + If ``dest`` is USS, this will act as Unix file mode, otherwise ignored. + + It should be noted that modes are octal numbers. The user must either add a leading zero so that Ansible's YAML parser knows it is an octal number (like ``0644`` or ``01777``)or quote it (like ``'644'`` or ``'1777'``) so Ansible receives a string and can do its own conversion from string into number. Giving Ansible a number without following one of these rules will end up with a decimal number which will have unexpected results. + + The mode may also be specified as a symbolic mode (for example, ``u+rwx`` or ``u=rw,g=r,o=r``) or a special string `preserve`. + + *mode=preserve* means that the file will be given the same permissions as the source file. + + | **required**: False + | **type**: str + + +owner + Name of the user that should own the filesystem object, as would be passed to the chown command. + + When left unspecified, it uses the current user unless you are root, in which case it can preserve the previous ownership. + + | **required**: False + | **type**: str + + +include + A list of directories, files or data set names to extract from the archive. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + When ``include`` is set, only those files will we be extracted leaving the remaining files in the archive. + + Mutually exclusive with exclude. + + | **required**: False + | **type**: list + | **elements**: str + + +exclude + List the directory and file or data set names that you would like to exclude from the unarchive action. + + GDS relative names are supported. e.g. *USER.GDG(-1*). + + Mutually exclusive with include. + + | **required**: False + | **type**: list + | **elements**: str + + +list + Will list the contents of the archive without unpacking. + + | **required**: False + | **type**: bool + | **default**: False + + +dest_data_set + Data set attributes to customize a ``dest`` data set that the archive will be copied into. + + | **required**: False + | **type**: dict + + + name + Desired name for destination dataset. + + | **required**: False + | **type**: str + + + type + Organization of the destination + + | **required**: False + | **type**: str + | **default**: seq + | **choices**: seq, pds, pdse + + + space_primary + If the destination *dest* data set does not exist , this sets the primary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_secondary + If the destination *dest* data set does not exist , this sets the secondary space allocated for the data set. + + The unit of space used is set using *space_type*. + + | **required**: False + | **type**: int + + + space_type + If the destination data set does not exist, this sets the unit of measurement to use when defining primary and secondary space. + + Valid units of size are ``k``, ``m``, ``g``, ``cyl``, and ``trk``. + + | **required**: False + | **type**: str + | **choices**: k, m, g, cyl, trk + + + record_format + If the destination data set does not exist, this sets the format of the data set. (e.g ``fb``) + + Choices are case-sensitive. + + | **required**: False + | **type**: str + | **choices**: fb, vb, fba, vba, u + + + record_length + The length of each record in the data set, in bytes. + + For variable data sets, the length must include the 4-byte prefix area. + + Defaults vary depending on format: If FB/FBA 80, if VB/VBA 137, if U 0. + + | **required**: False + | **type**: int + + + block_size + The block size to use for the data set. + + | **required**: False + | **type**: int + + + directory_blocks + The number of directory blocks to allocate to the data set. + + | **required**: False + | **type**: int + + + key_offset + The key offset to use when creating a KSDS data set. + + *key_offset* is required when *type=ksds*. + + *key_offset* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + key_length + The key length to use when creating a KSDS data set. + + *key_length* is required when *type=ksds*. + + *key_length* should only be provided when *type=ksds* + + | **required**: False + | **type**: int + + + sms_storage_class + The storage class for an SMS-managed dataset. + + Required for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_data_class + The data class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + sms_management_class + The management class for an SMS-managed dataset. + + Optional for SMS-managed datasets that do not match an SMS-rule. + + Not valid for datasets that are not SMS-managed. + + Note that all non-linear VSAM datasets are SMS-managed. + + | **required**: False + | **type**: str + + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary data sets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the environment variable value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + +force + If set to true and the remote file or data set dest exists, the dest will be deleted. + + | **required**: False + | **type**: bool + | **default**: False + + +remote_src + If set to true, ``zos_unarchive`` retrieves the archive from the remote system. + + If set to false, ``zos_unarchive`` searches the local machine (Ansible controller) for the archive. + + | **required**: False + | **type**: bool + | **default**: False + + +encoding + Specifies the character encoding conversion to be applied to the destination files after unarchiving. + + Supported character sets rely on the charset conversion utility ``iconv`` version the most common character sets are supported. + + After conversion the files are stored in same location as they were unarchived to under the same original name. No backup of the original unconverted files is there as for that unarchive can be executed again without encoding params on same source archive files. + + Destination files will be converted to the new encoding and will not be restored to their original encoding. + + If encoding fails for any file in a set of multiple files, an exception will be raised and the name of the file skipped will be provided completing the task successfully with rc code 0. + + Encoding does not check if the file is already present or not. It works on the file/files successfully unarchived. + + | **required**: False + | **type**: dict + + + from + The character set of the source *src*. + + | **required**: False + | **type**: str + + + to + The destination *dest* character set for the files to be written as. + + | **required**: False + | **type**: str + + + skip_encoding + List of names to skip encoding after unarchiving. This is only used if *encoding* is set, otherwise is ignored. + + | **required**: False + | **type**: list + | **elements**: str + + + + + +Attributes +---------- +action + | **support**: full + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: full + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + # Simple extract + - name: Copy local tar file and unpack it on the managed z/OS node. + zos_unarchive: + src: "./files/archive_folder_test.tar" + format: + name: tar + + # use include + - name: Unarchive a bzip file selecting only a file to unpack. + zos_unarchive: + src: "/tmp/test.bz2" + format: + name: bz2 + include: + - 'foo.txt' + + # Use exclude + - name: Unarchive a terse data set and excluding data sets from unpacking. + zos_unarchive: + src: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + exclude: + - USER.ARCHIVE.TEST1 + - USER.ARCHIVE.TEST2 + + # Unarchive a GDS + - name: Unarchive a terse data set and excluding data sets from unpacking. + zos_unarchive: + src: "USER.ARCHIVE(0)" + format: + name: terse + + # List option + - name: List content from XMIT + zos_unarchive: + src: "USER.ARCHIVE.RESULT.XMIT" + format: + name: xmit + format_options: + use_adrdssu: true + list: true + + # Encoding example + - name: Encode the destination data set into Latin-1 after unarchiving. + zos_unarchive: + src: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + encoding: + from: IBM-1047 + to: ISO8859-1 + + - name: Encode the destination data set into Latin-1 after unarchiving. + zos_unarchive: + src: "USER.ARCHIVE.RESULT.TRS" + format: + name: terse + encoding: + from: IBM-1047 + to: ISO8859-1 + skip_encoding: + - USER.ARCHIVE.TEST1 + + + + +Notes +----- + +.. note:: + VSAMs are not supported. + + This module uses `zos_copy <./zos_copy.html>`_ to copy local scripts to the remote machine which uses SFTP (Secure File Transfer Protocol) for the underlying transfer protocol; SCP (secure copy protocol) and Co:Z SFTP are not supported. In the case of Co:z SFTP, you can exempt the Ansible user id on z/OS from using Co:Z thus falling back to using standard SFTP. If the module detects SCP, it will temporarily use SFTP for transfers, if not available, the module will fail. + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_archive_module` + + + + +Return Values +------------- + + +src + File path or data set name unpacked. + + | **returned**: always + | **type**: str + +dest_path + Destination path where archive was unpacked. + + | **returned**: always + | **type**: str + +targets + List of files or data sets in the archive. + + | **returned**: success + | **type**: list + | **elements**: str + +missing + Any files or data sets not found during extraction. + + | **returned**: success + | **type**: str + +encoded + List of files or data sets that were successfully encoded. + + | **returned**: success + | **type**: list + +failed_on_encoding + List of files or data sets that were failed while encoding. + + | **returned**: success + | **type**: list + +skipped_encoding_targets + List of files or data sets that were skipped while encoding. + + | **returned**: success + | **type**: list + diff --git a/docs/source/modules/zos_volume_init.rst b/docs/source/modules/zos_volume_init.rst new file mode 100644 index 0000000000..68027cc310 --- /dev/null +++ b/docs/source/modules/zos_volume_init.rst @@ -0,0 +1,271 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_volume_init.py + +.. _zos_volume_init_module: + + +zos_volume_init -- Initialize volumes or minidisks. +=================================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- Initialize a volume or minidisk on z/OS. +- *zos_volume_init* will create the volume label and entry into the volume table of contents (VTOC). +- Volumes are used for storing data and executable programs. +- A minidisk is a portion of a disk that is linked to your virtual machine. +- A VTOC lists the data sets that reside on a volume, their location, size, and other attributes. +- *zos_volume_init* uses the ICKDSF command INIT to initialize a volume. In some cases the command could be protected by facility class `STGADMIN.ICK.INIT`. Protection occurs when the class is active, and the class profile is defined. Ensure the user executing the Ansible task is permitted to execute ICKDSF command INIT, otherwise, any user can use the command. +- ICKDSF is an Authorized Program Facility (APF) program on z/OS, *zos_volume_init* will run in authorized mode but if the program ICKDSF is not APF authorized, the task will end. +- Note that defaults set on target z/OS systems may override ICKDSF parameters. +- If is recommended that data on the volume is backed up as the *zos_volume_init* module will not perform any backups. You can use the `zos_backup_restore <./zos_backup_restore.html>`_ module to backup a volume. + + + + + +Parameters +---------- + + +address + *address* is a 3 or 4 digit hexadecimal number that specifies the address of the volume or minidisk. + + *address* can be the number assigned to the device (device number) when it is installed or the virtual address. + + | **required**: True + | **type**: str + + +verify_volid + Verify that the volume serial matches what is on the existing volume or minidisk. + + *verify_volid* must be 1 to 6 alphanumeric characters or ``*NONE*``. + + To verify that a volume serial number does not exist, use *verify_volid=*NONE**. + + If *verify_volid* is specified and the volume serial number does not match that found on the volume or minidisk, initialization does not complete. + + If *verify_volid=*NONE** is specified and a volume serial is found on the volume or minidisk, initialization does not complete. + + Note, this option is **not** a boolean, leave it blank to skip the verification. + + | **required**: False + | **type**: str + + +verify_offline + Verify that the device is not online to any other systems, initialization does not complete. + + | **required**: False + | **type**: bool + | **default**: True + + +volid + The volume serial number used to initialize a volume or minidisk. + + Expects 1-6 alphanumeric, national ($,#,@) or special characters. + + A *volid* with less than 6 characters will be padded with spaces. + + A *volid* can also be referred to as volser or volume serial number. + + When *volid* is not specified for a previously initialized volume or minidisk, the volume serial number will remain unchanged. + + | **required**: False + | **type**: str + + +vtoc_size + The number of tracks to initialize the volume table of contents (VTOC) with. + + The VTOC will be placed in cylinder 0 head 1. + + If no tracks are specified it will default to the number of tracks in a cylinder minus 1. Tracks in a cylinder vary based on direct-access storage device (DASD) models, for 3390 a cylinder is 15 tracks. + + | **required**: False + | **type**: int + + +index + Create a volume table of contents (VTOC) index. + + The VTOC index enhances the performance of VTOC access. + + When set to *false*, no index will be created. + + | **required**: False + | **type**: bool + | **default**: True + + +sms_managed + Specifies that the volume be managed by Storage Management System (SMS). + + If *sms_managed* is *true* then *index* must also be *true*. + + | **required**: False + | **type**: bool + | **default**: True + + +verify_volume_empty + Verify that no data sets other than the volume table of contents (VTOC) index or the VSAM Volume Data Set(VVDS) exist on the target volume. + + | **required**: False + | **type**: bool + | **default**: True + + +tmp_hlq + Override the default high level qualifier (HLQ) for temporary and backup datasets. + + The default HLQ is the Ansible user used to execute the module and if that is not available, then the value ``TMPHLQ`` is used. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Initialize target volume with all default options. Target volume address is '1234', set volume name to 'DEMO01'. + Target volume is checked to ensure it is offline and contains no data sets. Volume is SMS managed, has an index + and VTOC size defined by the system. + zos_volume_init: + address: "1234" + volid: "DEMO01" + + - name: Initialize target volume with all default options and additionally check the existing volid + matches the given value 'DEMO02' before re-initializing the volume and renaming it to 'DEMO01'. + zos_volume_init: + address: "1234" + volid: "DEMO01" + verify_volid: "DEMO02" + + - name: Initialize non-SMS managed target volume with all the default options. + zos_volume_init: + address: "1234" + volid: "DEMO01" + sms_managed: false + + - name: Initialize non-SMS managed target volume with all the default options and + override the default high level qualifier (HLQ). + zos_volume_init: + address: 1234 + volid: DEMO01 + sms_managed: false + tmp_hlq: TESTUSR + + - name: Initialize a new SMS managed DASD volume with new volume serial 'e8d8' with 30 track VTOC, an index, as long as + the existing volume serial is 'ine8d8' and there are no pre-existing data sets on the target. The check to see + if volume is online before intialization is skipped. + zos_volume_init: + address: e8d8 + vtoc_size: 30 + index: true + sms_managed: true + volid: ine8d8 + verify_volid: ine8d8 + verify_volume_empty: true + verify_offline: false + + - name: Initialize 3 new DASD volumes (0901, 0902, 0903) for use on a z/OS system as 'DEMO01', 'DEMO02', 'DEMO03' + using Ansible loops. + zos_volume_init: + address: "090{{ item }}" + volid: "DEMO0{{ item }}" + loop: "{{ range(1, 4, 1) }}" + + + + + + +See Also +-------- + +.. seealso:: + + - :ref:`zos_backup_restore_module` + + + + +Return Values +------------- + + +msg + Failure message returned by module. + + | **returned**: failure + | **type**: str + | **sample**: 'Index' cannot be False for SMS managed volumes. + +rc + Return code from ICKDSF init command. + + | **returned**: when ICKDSF program is run. + | **type**: dict + +content + Raw output from ICKDSF. + + | **returned**: when ICKDSF program is run. + | **type**: list + | **elements**: str + | **sample**: + + .. code-block:: json + + [ + "1ICKDSF - MVS/ESA DEVICE SUPPORT FACILITIES 17.0 TIME: 18:32:22 01/17/23 PAGE 1", + "0 ", + "0 INIT UNIT(0903) NOVERIFY NOVERIFYOFFLINE VOLID(KET678) -", + "0 NODS NOINDEX", + "-ICK00700I DEVICE INFORMATION FOR 0903 IS CURRENTLY AS FOLLOWS:", + "- PHYSICAL DEVICE = 3390", + "- STORAGE CONTROLLER = 2107", + "- STORAGE CONTROL DESCRIPTOR = E8", + "- DEVICE DESCRIPTOR = 0C", + "- ADDITIONAL DEVICE INFORMATION = 4A00003C", + "- TRKS/CYL = 15, # PRIMARY CYLS = 100", + "0ICK04000I DEVICE IS IN SIMPLEX STATE", + "0ICK00703I DEVICE IS OPERATED AS A MINIDISK", + " ICK00091I 0903 NED=002107.900.IBM.75.0000000BBA01", + "-ICK03091I EXISTING VOLUME SERIAL READ = KET987", + "-ICK03096I EXISTING VTOC IS LOCATED AT CCHH=X\u00270000 0001\u0027 AND IS 14 TRACKS.", + "0ICK01314I VTOC IS LOCATED AT CCHH=X\u00270000 0001\u0027 AND IS 14 TRACKS.", + "-ICK00001I FUNCTION COMPLETED, HIGHEST CONDITION CODE WAS 0", + "0 18:32:22 01/17/23", + "0 ", + "-ICK00002I ICKDSF PROCESSING COMPLETE. MAXIMUM CONDITION CODE WAS 0" + ] + diff --git a/docs/source/modules/zos_zfs_resize.rst b/docs/source/modules/zos_zfs_resize.rst new file mode 100644 index 0000000000..9946c5b71b --- /dev/null +++ b/docs/source/modules/zos_zfs_resize.rst @@ -0,0 +1,314 @@ + +:github_url: https://github.com/ansible-collections/ibm_zos_core/blob/dev/plugins/modules/zos_zfs_resize.py + +.. _zos_zfs_resize_module: + + +zos_zfs_resize -- Resize a zfs data set. +======================================== + + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The module `zos_zfs_resize. `_ can resize a zFS aggregate data set. +- The *target* data set must be a unique and a Fully Qualified Name (FQN) of a z/OS zFS aggregate data set. +- The data set must be attached as read-write. +- *size* must be provided. + + + + + +Parameters +---------- + + +target + The Fully Qualified Name of the zFS data set that will be resized. + + | **required**: True + | **type**: str + + +size + The desired size of the data set after the resizing is performed. + + | **required**: True + | **type**: int + + +space_type + The unit of measurement to use when defining the size. + + Valid units are ``k`` (kilobytes), ``m`` (megabytes), ``g`` (gigabytes), ``cyl`` (cylinders), and ``trk`` (tracks). + + | **required**: False + | **type**: str + | **default**: k + | **choices**: k, m, g, cyl, trk + + +no_auto_increase + Option controls whether the data set size will be automatically increased when performing a shrink operation. + + When set to ``true``, during the shrinking of the zFS aggregate, if more space be needed the total size will not be increased and the module will fail. + + | **required**: False + | **type**: bool + | **default**: False + + +verbose + Return diagnostic messages that describe the module's execution. + + Verbose includes standard out (stdout) of the module's execution which can be excessive, to avoid writing this to stdout, optionally you can set the ``trace_destination`` instead. + + | **required**: False + | **type**: bool + | **default**: False + + +trace_destination + Specify a unique USS file name or data set name for ``trace_destination``. + + If the destination ``trace_destination`` is a USS file or path, the ``trace_destination`` must be an absolute path name. + + Support MVS data set type PDS, PDS/E(MEMBER) + + If the destination is an MVS data set name, the ``trace_destination`` provided must meet data set naming conventions of one or more qualifiers, each from one to eight characters long, that are delimited by periods + + Recommended characteristics for MVS data set are record length of 200, record format as vb and space primary 42000 kilobytes. + + | **required**: False + | **type**: str + + + + +Attributes +---------- +action + | **support**: none + | **description**: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. +async + | **support**: full + | **description**: Supports being used with the ``async`` keyword. +check_mode + | **support**: none + | **description**: Can run in check_mode and return changed status prediction without modifying target. If not supported, the action will be skipped. + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Resize an aggregate data set to 2500 kilobytes. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 2500 + + - name: Resize an aggregate data set to 20 tracks. + zos_zfs_resize: + target: TEST.ZFS.DATA + space_type: trk + size: 20 + + - name: Resize an aggregate data set to 4 megabytes. + zos_zfs_resize: + target: TEST.ZFS.DATA + space_type: m + size: 4 + + - name: Resize an aggregate data set to 1000 kilobytes and set no auto increase if it's shrinking. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 1000 + no_auto_increase: true + + - name: Resize an aggregate data set and get verbose output. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 2500 + verbose: true + + - name: Resize an aggregate data set and get the full trace on a file. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 2500 + trace_destination: /tmp/trace.txt + + - name: Resize an aggregate data set and capture the trace into a PDS member. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 2500 + trace_destination: "TEMP.HELPER.STORAGE(RESIZE)" + + - name: Resize an aggregate data set and capture the trace into a file with verbose output. + zos_zfs_resize: + target: TEST.ZFS.DATA + size: 2500 + verbose: true + trace_destination: /tmp/trace.txt + + + + +Notes +----- + +.. note:: + If needed, allocate the zFS trace output data set as a PDSE with RECFM=VB, LRECL=133 with a primary allocation of at least 50 cylinders and a secondary allocation of 30 cylinders. + + `zfsadm documentation `_. + + + + + + + +Return Values +------------- + + +cmd + The zfsadm command executed on the remote node. + + | **returned**: always + | **type**: str + | **sample**: zfsadm grow -aggregate SOMEUSER.VVV.ZFS -size 4096 + +target + The Fully Qualified Name of the resized zFS data set. + + | **returned**: always + | **type**: str + | **sample**: SOMEUSER.VVV.ZFS + +mount_target + The original share/mount of the data set. + + | **returned**: always + | **type**: str + | **sample**: /tmp/zfs_agg + +size + The desired size from option ``size`` according to ``space_type``. The resulting size can vary slightly, the actual space utilization is returned in ``new_size``. + + | **returned**: always + | **type**: int + | **sample**: 4024 + +rc + The return code of the zfsadm command. + + | **returned**: always + | **type**: int + +old_size + The original data set size according to ``space_type`` before resizing was performed. + + | **returned**: always + | **type**: float + | **sample**: + + .. code-block:: json + + 3096 + +old_free_space + The original data sets free space according to ``space_type`` before resizing was performed. + + | **returned**: always + | **type**: float + | **sample**: + + .. code-block:: json + + 2.1 + +new_size + The data set size according to ``space_type`` after resizing was performed. + + | **returned**: success + | **type**: float + | **sample**: + + .. code-block:: json + + 4032 + +new_free_space + The data sets free space according to ``space_type`` after resizing was performed. + + | **returned**: success + | **type**: float + | **sample**: + + .. code-block:: json + + 1.5 + +space_type + The measurement unit of space used to report all size values. + + | **returned**: always + | **type**: str + | **sample**: k + +stdout + The modules standard out (stdout) that is returned. + + | **returned**: always + | **type**: str + | **sample**: IOEZ00173I Aggregate TEST.ZFS.DATA.USER successfully grown. + +stderr + The modules standard error (stderr) that is returned. it may have no return value. + + | **returned**: always + | **type**: str + | **sample**: IOEZ00181E Could not open trace output dataset. + +stdout_lines + List of strings containing individual lines from standard out (stdout). + + | **returned**: always + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "IOEZ00173I Aggregate TEST.ZFS.DATA.USER successfully grown." + ] + +stderr_lines + List of strings containing individual lines from standard error (stderr). + + | **returned**: always + | **type**: list + | **sample**: + + .. code-block:: json + + [ + "IOEZ00181E Could not open trace output dataset." + ] + +verbose_output + If ``verbose=true``, the operation's full traceback will show for this property. + + | **returned**: always + | **type**: str + | **sample**: 6FB2F8 print_trace_table printing contents of table Main Trace Table... + diff --git a/docs/source/roles/job_status.rst b/docs/source/roles/job_status.rst new file mode 100644 index 0000000000..42151c0fd3 --- /dev/null +++ b/docs/source/roles/job_status.rst @@ -0,0 +1,77 @@ + +:github_url: https://github.com/IBM/ibm_zosmf/tree/master/plugins/roles/job_status + +.. _job_status_module: + + +job_status -- Role that query, extract job status and if is running. +==================================================================== + + +.. contents:: + :local: + :depth: 1 + + +Synopsis +-------- +- The **IBM z/ibm_zos_core collection** provides an Ansible role, referred to as **job_status**, to query a particular job with a given job_id and parse the response to return as a msg the job status and if the job is currently running or not. + + + + + + + +Variables +--------- + + + + +job_id + The job id that has been assigned to the job. + + A job id must begin with `STC`, `JOB`, `TSU` and are followed by up to 5 digits. + + When a job id is greater than 99,999, the job id format will begin with `S`, `J`, `T` and are followed by 7 digits. + + | **required**: True + | **type**: str + + + + +Examples +-------- + +.. code-block:: yaml+jinja + + + - name: Query the job status and if is running of the job STC00001 + hosts: sampleHost + gather_facts: no + collections: + - ibm.ibm_zos_core + tasks: + - include_role: + name: job_status + vars: + job_oid: STC00001 + + + + +Notes +----- + +.. note:: + - The role tolerate the asterisk (`*`) as wildcard but only retrieve information from the first job returned that math the patter. + + + + + + + + From 9d183fd90da68b3e60bcc31575d3ec41e1817b81 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 13:56:53 -0600 Subject: [PATCH 26/31] Added source --- docs/source/filters.rst | 8 ++++++++ docs/source/roles.rst | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/source/filters.rst b/docs/source/filters.rst index bbf24c6d41..a78680b044 100644 --- a/docs/source/filters.rst +++ b/docs/source/filters.rst @@ -27,7 +27,15 @@ the `filter`_ directory included in the collection. https://github.com/ansible-collections/ibm_zos_core/tree/main/plugins/filter/ +The **IBM z/OS core** collection provides many filters. +Reference material for each role contains documentation on how to use certain +filters in your playbook. +.. toctree:: + :maxdepth: 1 + :glob: + + filters/* diff --git a/docs/source/roles.rst b/docs/source/roles.rst index b61b941232..b0cef42962 100644 --- a/docs/source/roles.rst +++ b/docs/source/roles.rst @@ -17,7 +17,7 @@ recommend migration actions between version 1 and version 2, collect diagnostic facts for support and debugging, and easily determine whether a job is currently running. -The **IBM z/OS core** provides many roles. +The **IBM z/OS core** collection provides many roles. Reference material for each role contains documentation on how to use certain roles in your playbook. From 5785faceec0ee6055a18733431b3de62972093de Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 14:07:50 -0600 Subject: [PATCH 27/31] Modified doc generation makefile --- docs/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index 4b7f3d687d..eaf87fad04 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -22,7 +22,7 @@ help: @echo "$(line_header)" @echo " make clean;make role-doc;make html;make view-html;" @echo " make clean;make module-doc;make html;make view-html;" - @echo " make clean;make module-doc;make role-doc;make html;make view-html;" + @echo " make clean;make filter-doc;make module-doc;make role-doc;make html;make view-html;" @echo " make clean;" @echo $(line_header) @@ -96,7 +96,7 @@ clean: mv -f ../plugins/modules/__init__.py.skip ../plugins/modules/__init__.py; \ fi - @echo "Completed cleanup, run 'make module-doc' or 'make role-doc'." + @echo "Completed cleanup, run 'make module-doc', 'male filter-doc' or 'make role-doc'." filter-doc: @echo $(line_header) From de45adf6f80e64d937e81e9bdeb92dedaeb766d9 Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 14:10:38 -0600 Subject: [PATCH 28/31] Updated documentation --- ansible.cfg | 2 +- plugins/filter/generate_data_set_name.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index b452495ff1..90f6cac987 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,5 @@ ################################################################################ -# Copyright (c) IBM Corporation 2020, 2021 +# Copyright (c) IBM Corporation 2020, 2025 ################################################################################ ################################################################################ diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index 13aa56e29f..cdfc1590a4 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -17,28 +17,28 @@ name: generate_data_set_name author: Marcel Gutierrez (@AndreMarcel99) version_added: "2.0.0" -short_description: Filter returned valid data set names +short_description: Filter HLQs to generate a new random valid data set name. description: - Provide a valid temporary data set name. options: value: description: - - High level qualifier. + - High level qualifier to be used in the data set names. type: str required: true middle_level_qualifier: description: - - Possible valid middle level qualifier. + - Middle level qualifier to be used in the data set names. type: str required: false last_level_qualifier: description: - - Possible valid last level qualifier. + - Low level qualifier to be used in the data set names. type: str required: false num_names: description: - - Number of data set names that you require to generate. + - Number of data set names to be generated. type: int required: false ''' From 40924ee3f92b536433b28d636f6d6114aeb000bd Mon Sep 17 00:00:00 2001 From: Fernando Flores Date: Tue, 11 Nov 2025 14:11:39 -0600 Subject: [PATCH 29/31] Updated rst --- docs/source/filters/generate_data_set_name.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/filters/generate_data_set_name.rst b/docs/source/filters/generate_data_set_name.rst index f3d24ce5bf..b04361e4a1 100644 --- a/docs/source/filters/generate_data_set_name.rst +++ b/docs/source/filters/generate_data_set_name.rst @@ -4,8 +4,8 @@ .. _generate_data_set_name_module: -generate_data_set_name -- Filter returned valid data set names -============================================================== +generate_data_set_name -- Filter HLQs to generate a new random valid data set name. +=================================================================================== @@ -27,28 +27,28 @@ Parameters value - High level qualifier. + High level qualifier to be used in the data set names. | **required**: True | **type**: str middle_level_qualifier - Possible valid middle level qualifier. + Middle level qualifier to be used in the data set names. | **required**: False | **type**: str last_level_qualifier - Possible valid last level qualifier. + Low level qualifier to be used in the data set names. | **required**: False | **type**: str num_names - Number of data set names that you require to generate. + Number of data set names to be generated. | **required**: False | **type**: int From 2cbc0166669d9ea3d49875745565a2966c17a7a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Tue, 11 Nov 2025 14:30:08 -0600 Subject: [PATCH 30/31] Modifiy from last to low --- plugins/filter/generate_data_set_name.py | 35 ++++++++++--------- .../filters/test_generate_data_set_name.py | 10 +++--- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/plugins/filter/generate_data_set_name.py b/plugins/filter/generate_data_set_name.py index cdfc1590a4..35398ef8d6 100644 --- a/plugins/filter/generate_data_set_name.py +++ b/plugins/filter/generate_data_set_name.py @@ -26,12 +26,13 @@ - High level qualifier to be used in the data set names. type: str required: true + samples: USER middle_level_qualifier: description: - Middle level qualifier to be used in the data set names. type: str required: false - last_level_qualifier: + low_level_qualifier: description: - Low level qualifier to be used in the data set names. type: str @@ -52,21 +53,21 @@ set_fact: data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM') }}" -- name: Filter to generate a data set name with a specific last level qualifier +- name: Filter to generate a data set name with a specific low level qualifier set_fact: - data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM') }}" + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(low_level_qualifier='LLQADM') }}" -- name: Filter to generate a data set name with a specific middle and last level qualifier +- name: Filter to generate a data set name with a specific middle and low level qualifier set_fact: - data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', last_level_qualifier='LLQADM') }}" + data_set_name: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(middle_level_qualifier='MLQADM', low_level_qualifier='LLQADM') }}" - name: Filter to generate 10 data set names set_fact: data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(num_names=10) }}" -- name: Filter to generate 3 data set names with a specific last level qualifier +- name: Filter to generate 3 data set names with a specific low level qualifier set_fact: - data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(last_level_qualifier='LLQADM', num_names=3) }}" + data_set_names: "{{ hlq | ibm.ibm_zos_core.generate_data_set_name(low_level_qualifier='LLQADM', num_names=3) }}" - name: Filter to generate 5 data set names with a specific middle level qualifier set_fact: @@ -87,13 +88,13 @@ from ansible.errors import AnsibleFilterError -def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifier="", num_names=1): +def generate_data_set_name(value, middle_level_qualifier="", low_level_qualifier="", num_names=1): """Filter to generate valid data set names Args: value {str} -- value of high level qualifier to use on data set names middle_level_qualifier {str,optional} -- str of a possible qualifier - last_level_qualifier {str, optional} -- str of a possible qualifier + low_level_qualifier {str, optional} -- str of a possible qualifier num_names {int, optional} -- number of dataset names to generate. Defaults to 1. Returns: @@ -109,26 +110,26 @@ def generate_data_set_name(value, middle_level_qualifier="", last_level_qualifie if bool(middle_level_qualifier): mlq = validate_qualifier(qualifier=middle_level_qualifier) - if bool(last_level_qualifier): - llq = validate_qualifier(qualifier=last_level_qualifier) + if bool(low_level_qualifier): + llq = validate_qualifier(qualifier=low_level_qualifier) if num_names > 1: dataset_names = [] for generation in range(num_names): - name = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, last_level_qualifier=llq) + name = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, low_level_qualifier=llq) dataset_names.append(name) else: - dataset_names = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, last_level_qualifier=llq) + dataset_names = hlq + get_tmp_ds_name(middle_level_qualifier=mlq, low_level_qualifier=llq) return dataset_names -def get_tmp_ds_name(middle_level_qualifier="", last_level_qualifier=""): +def get_tmp_ds_name(middle_level_qualifier="", low_level_qualifier=""): """Unify the random qualifiers generated into one name. Args: middle_level_qualifier {str,optional} -- valid str of a qualifier - last_level_qualifier {str, optional} -- valid str of a qualifier + low_level_qualifier {str, optional} -- valid str of a qualifier Returns: str: valid data set name @@ -142,8 +143,8 @@ def get_tmp_ds_name(middle_level_qualifier="", last_level_qualifier=""): ds += "C" + get_random_q() + "." - if bool(last_level_qualifier): - ds += last_level_qualifier + if bool(low_level_qualifier): + ds += low_level_qualifier else: ds += "T" + get_random_q() diff --git a/tests/functional/filters/test_generate_data_set_name.py b/tests/functional/filters/test_generate_data_set_name.py index da523e6f7a..ac12915810 100644 --- a/tests/functional/filters/test_generate_data_set_name.py +++ b/tests/functional/filters/test_generate_data_set_name.py @@ -71,7 +71,7 @@ def test_generate_data_set_name_llq_filter(ansible_zos_module): hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" - f"last_level_qualifier='{llq}'" + f"low_level_qualifier='{llq}'" f") }}}}" ) results = hosts.all.debug(msg=jinja_expr) @@ -89,7 +89,7 @@ def test_generate_data_set_name_llq_multiple_generations_filter(ansible_zos_modu hosts.all.set_fact(input_string=input_string) jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" - f"last_level_qualifier='{llq}', " + f"low_level_qualifier='{llq}', " f"num_names={num_names}" f") }}}}" ) @@ -110,7 +110,7 @@ def test_generate_data_set_name_mlq_llq_filter(ansible_zos_module): jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"middle_level_qualifier='{mlq}', " - f"last_level_qualifier='{llq}') }}}}" + f"low_level_qualifier='{llq}') }}}}" ) results = hosts.all.debug(msg=jinja_expr) @@ -130,7 +130,7 @@ def test_generate_data_set_name_mlq_llq_multiple_generations_filter(ansible_zos_ jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"middle_level_qualifier='{mlq}', " - f"last_level_qualifier='{llq}', " + f"low_level_qualifier='{llq}', " f"num_names={num_names}" f") }}}}" ) @@ -195,7 +195,7 @@ def test_generate_data_set_name_mlq_bad_llq(ansible_zos_module): jinja_expr = ( f"{{{{ input_string | generate_data_set_name(" f"middle_level_qualifier='{mlq}', " - f"last_level_qualifier='{llq}') }}}}" + f"low_level_qualifier='{llq}') }}}}" ) results = hosts.all.debug(msg=jinja_expr) From e6b71d29afe6dd267393d373b7dd94d8e8ea0941 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Marcel=20Guti=C3=A9rrez=20Ben=C3=ADtez?= Date: Tue, 11 Nov 2025 15:25:42 -0600 Subject: [PATCH 31/31] Update error message test case --- tests/functional/filters/test_generate_data_set_name.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/filters/test_generate_data_set_name.py b/tests/functional/filters/test_generate_data_set_name.py index ac12915810..c8cba90caf 100644 --- a/tests/functional/filters/test_generate_data_set_name.py +++ b/tests/functional/filters/test_generate_data_set_name.py @@ -219,4 +219,4 @@ def test_generate_data_set_name_filter_bad_hlq(ansible_zos_module): for result in results.contacted.values(): assert result.get('failed') is True - assert result.get('msg') == "Require to be provide a HLQ." \ No newline at end of file + assert result.get('msg') == "A High-Level Qualifier is required." \ No newline at end of file