Puppet Plan: peadm::add_replica

Defined in:
plans/add_replica.pp

Summary

Replace a replica host for a Standard or Large architecture. Supported use cases: 1: The existing replica is broken, we have a fresh new VM we want to provision the replica to. The new replica should have the same certname as the broken one.

Overview

This plan is still in development and currently considered experimental.

Parameters:

  • primary_host (Peadm::SingleTargetSpec)
    • The hostname and certname of the primary Puppet server

  • replica_host (Peadm::SingleTargetSpec)
    • The hostname and certname of the replica VM

  • replica_postgresql_host (Optional[Peadm::SingleTargetSpec]) (defaults to: undef)
    • The hostname and certname of the host with the replica PE-PosgreSQL database.

    Can be a separate host in an XL architecture, or undef in Standard or Large.

  • token_file (Optional[String]) (defaults to: undef)


13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# File 'plans/add_replica.pp', line 13

plan peadm::add_replica(
  # Standard or Large
  Peadm::SingleTargetSpec           $primary_host,
  Peadm::SingleTargetSpec           $replica_host,

  # Extra Large
  Optional[Peadm::SingleTargetSpec] $replica_postgresql_host = undef,

  # Common Configuration
  Optional[String] $token_file = undef,
) {

  $primary_target             = peadm::get_targets($primary_host, 1)
  $replica_target             = peadm::get_targets($replica_host, 1)
  $replica_postgresql_target  = peadm::get_targets($replica_postgresql_host, 1)

  run_command('systemctl stop puppet.service', peadm::flatten_compact([
    $primary_target,
    $replica_postgresql_target,
  ]))

  # Get current peadm config to ensure we forget active replicas
  $peadm_config = run_task('peadm::get_peadm_config', $primary_target).first.value

  # Make list of all possible replicas, configured and provided
  $replicas = peadm::flatten_compact([
    $replica_host,
    $peadm_config['params']['replica_host']
  ]).unique

  $certdata = run_task('peadm::cert_data', $primary_target).first.value
  $primary_avail_group_letter = $certdata['extensions'][peadm::oid('peadm_availability_group')]
  $replica_avail_group_letter = $primary_avail_group_letter ? { 'A' => 'B', 'B' => 'A' }

  # replica certname + any non-certname alt-names from the primary. Make sure
  # to Handle the case where there are no alt-names in the primary's certdata.
  $dns_alt_names = [$replica_target.peadm::certname()] + (pick($certdata['dns-alt-names'], []) - $certdata['certname'])

  # This has the effect of revoking the node's certificate, if it exists
  $replicas.each |$replica| {
    run_command("/opt/puppetlabs/bin/puppet infrastructure forget ${replica}", $primary_target, _catch_errors => true)
  }

  run_plan('peadm::subplans::component_install', $replica_target,
    primary_host       => $primary_target,
    avail_group_letter => $replica_avail_group_letter,
    role               => 'puppet/server',
    dns_alt_names      => $dns_alt_names
  )

  # On the PE-PostgreSQL server in the <replacement-avail-group-letter> group

  # Stop puppet and add the following two lines to
  # /opt/puppetlabs/server/data/postgresql/11/data/pg_ident.conf
  #  pe-puppetdb-pe-puppetdb-map <replacement-replica-fqdn> pe-puppetdb
  #  pe-puppetdb-pe-puppetdb-migrator-map <replacement-replica-fqdn> pe-puppetdb-migrator
  apply($replica_postgresql_target) {
    file_line { 'pe-puppetdb-pe-puppetdb-map':
      path => '/opt/puppetlabs/server/data/postgresql/11/data/pg_ident.conf',
      line => "pe-puppetdb-pe-puppetdb-map ${replica_target.peadm::certname()} pe-puppetdb",
    }
    file_line { 'pe-puppetdb-pe-puppetdb-migrator-map':
      path => '/opt/puppetlabs/server/data/postgresql/11/data/pg_ident.conf',
      line => "pe-puppetdb-pe-puppetdb-migrator-map ${replica_target.peadm::certname()} pe-puppetdb-migrator",
    }
    file_line { 'pe-puppetdb-pe-puppetdb-read-map':
      path => '/opt/puppetlabs/server/data/postgresql/11/data/pg_ident.conf',
      line => "pe-puppetdb-pe-puppetdb-read-map ${replica_target.peadm::certname()} pe-puppetdb-read",
    }
  }

  run_command('systemctl reload pe-postgresql.service', $replica_postgresql_target)

  run_plan('peadm::util::update_classification', $primary_target,
    server_a_host                    => $replica_avail_group_letter ? { 'A' => $replica_host, default => undef },
    server_b_host                    => $replica_avail_group_letter ? { 'B' => $replica_host, default => undef },
    internal_compiler_a_pool_address => $replica_avail_group_letter ? { 'A' => $replica_host, default => undef },
    internal_compiler_b_pool_address => $replica_avail_group_letter ? { 'B' => $replica_host, default => undef },
    peadm_config                     => $peadm_config
  )

  # Source the global hiera.yaml from Primary and synchronize to new Replica 
  # Provision the new system as a replica
  run_plan('peadm::util::sync_global_hiera', $replica_target,
    primary_host => $primary_target
  )

  # Provision the new system as a replica
  run_task('peadm::provision_replica', $primary_target,
    replica    => $replica_target.peadm::certname(),
    token_file => $token_file,

    # Race condition, where the provision command checks PuppetDB status and
    # probably gets "starting", but fails out because that's not "running".
    # Can remove flag when that issue is fixed.
    legacy     => true,
  )

  # start puppet service
  run_command('systemctl start puppet.service', peadm::flatten_compact([
    $primary_target,
    $replica_postgresql_target,
    $replica_target
  ]))

  return("Added replica ${replica_target}")
}