Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/bestpractical/rt.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsunnavy <sunnavy@bestpractical.com>2022-10-20 18:03:39 +0300
committersunnavy <sunnavy@bestpractical.com>2022-10-28 00:08:55 +0300
commitb440822a33f6ba6448837c55821c3fac3b590227 (patch)
treeacf077e3d210d097a1c093bce376d70bef25997f
parent8d5b9727d98ff45591b54ef450750fb704f7dcdd (diff)
Add --all to serializer to export all data with UIDs and not check dependencies
This is faster than walking through dependencies. The collections are reordered so most dependencies can be serialized first, which reduces the importer work to resolve references.
-rw-r--r--lib/RT/Group.pm17
-rw-r--r--lib/RT/Migrate/Importer.pm2
-rw-r--r--lib/RT/Migrate/Serializer.pm69
-rw-r--r--lib/RT/User.pm19
-rw-r--r--sbin/rt-serializer.in14
5 files changed, 98 insertions, 23 deletions
diff --git a/lib/RT/Group.pm b/lib/RT/Group.pm
index ee84a5fffb..308f05955e 100644
--- a/lib/RT/Group.pm
+++ b/lib/RT/Group.pm
@@ -1737,13 +1737,20 @@ sub PreInflate {
return $duplicated->() if $obj->Id;
}
- my $id = $importer->NextPrincipalId( PrincipalType => 'Group', Disabled => $disabled );
+ # serialized data with --all has principals, in which case we don't need to create a new one.
+ my $principal_obj = $importer->LookupObj( $principal_uid );
+ if ( $principal_obj ) {
+ $data->{id} = $principal_obj->Id;
+ }
+ else {
+ my $id = $importer->NextPrincipalId( PrincipalType => 'Group', Disabled => $disabled );
- # Now we have a principal id, set the id for the group record
- $data->{id} = $id;
+ # Now we have a principal id, set the id for the group record
+ $data->{id} = $id;
+
+ $importer->Resolve( $principal_uid => 'RT::Principal', $id );
+ }
- $importer->Resolve( $principal_uid => 'RT::Principal', $id );
- $data->{id} = $id;
return 1;
}
diff --git a/lib/RT/Migrate/Importer.pm b/lib/RT/Migrate/Importer.pm
index e46c098de9..c486b5014d 100644
--- a/lib/RT/Migrate/Importer.pm
+++ b/lib/RT/Migrate/Importer.pm
@@ -132,6 +132,8 @@ sub LoadMetadata {
$self->{Organization} = $data->{Organization};
$self->{Clone} = $data->{Clone};
$self->{Incremental} = $data->{Incremental};
+ $self->{All} = $data->{All};
+
$self->{Files} = $data->{Files} if $data->{Final};
}
diff --git a/lib/RT/Migrate/Serializer.pm b/lib/RT/Migrate/Serializer.pm
index bce8530f22..253f65a2c6 100644
--- a/lib/RT/Migrate/Serializer.pm
+++ b/lib/RT/Migrate/Serializer.pm
@@ -76,6 +76,7 @@ sub Init {
Clone => 0,
Incremental => 0,
+ All => 0,
Verbose => 1,
@_,
@@ -99,6 +100,7 @@ sub Init {
HyperlinkUnmigrated
Clone
Incremental
+ All
/;
$self->{Clone} = 1 if $self->{Incremental};
@@ -108,7 +110,7 @@ sub Init {
# Keep track of the number of each type of object written out
$self->{ObjectCount} = {};
- if ($self->{Clone}) {
+ if ($self->{Clone} || $self->{All}) {
$self->PushAll;
} else {
$self->PushBasics;
@@ -132,6 +134,7 @@ sub Metadata {
Organization => $RT::Organization,
Clone => $self->{Clone},
Incremental => $self->{Incremental},
+ All => $self->{All},
ObjectCount => { $self->ObjectCount },
@_,
},
@@ -159,13 +162,32 @@ sub PushAll {
# Principals first; while we don't serialize these separately during
# normal dependency walking (we fold them into users and groups),
# having them separate during cloning makes logic simpler.
- $self->PushCollections(qw(Principals));
+ $self->PushCollections(qw(Principals)) if $self->{Clone};
- # Users and groups
- $self->PushCollections(qw(Users Groups GroupMembers));
+ # Users
+ $self->PushCollections(qw(Users));
+
+ # groups
+ if ( $self->{Clone} ) {
+ $self->PushCollections(qw(Groups));
+ }
+ else {
+ my $groups = RT::Groups->new(RT->SystemUser);
+ $groups->FindAllRows if $self->{FollowDisabled};
+ $groups->CleanSlate;
+ $groups->UnLimit;
+ $groups->Limit(
+ FIELD => 'Domain',
+ VALUE => [ 'RT::Queue-Role', 'RT::Ticket-Role', 'RT::Catalog-Role', 'RT::Asset-Role' ],
+ OPERATOR => 'NOT IN',
+ CASESENSITIVE => 0,
+ );
+ $groups->OrderBy( FIELD => 'id' );
+ $self->PushObj($groups);
+ }
# Tickets
- $self->PushCollections(qw(Queues Tickets Transactions Attachments Links));
+ $self->PushCollections(qw(Queues Tickets));
# Articles
$self->PushCollections(qw(Articles), map { ($_, "Object$_") } qw(Classes Topics));
@@ -176,6 +198,23 @@ sub PushAll {
# Assets
$self->PushCollections(qw(Catalogs Assets));
+ if ( !$self->{Clone} ) {
+ my $groups = RT::Groups->new( RT->SystemUser );
+ $groups->FindAllRows if $self->{FollowDisabled};
+ $groups->CleanSlate;
+ $groups->UnLimit;
+ $groups->Limit(
+ FIELD => 'Domain',
+ VALUE => [ 'RT::Queue-Role', 'RT::Ticket-Role', 'RT::Catalog-Role', 'RT::Asset-Role' ],
+ OPERATOR => 'IN',
+ CASESENSITIVE => 0,
+ );
+ $groups->OrderBy( FIELD => 'id' );
+ $self->PushObj($groups);
+ }
+
+ $self->PushCollections(qw(GroupMembers));
+
# Custom Fields
if (RT::ObjectCustomFields->require) {
$self->PushCollections(map { ($_, "Object$_") } qw(CustomFields CustomFieldValues));
@@ -187,10 +226,14 @@ sub PushAll {
$self->PushCollections(qw(ACL));
# Scrips
- $self->PushCollections(qw(Scrips ObjectScrips ScripActions ScripConditions Templates));
+ $self->PushCollections(qw(ScripActions ScripConditions Templates Scrips ObjectScrips));
# Attributes
$self->PushCollections(qw(Attributes));
+
+ $self->PushCollections(qw(Links));
+
+ $self->PushCollections(qw(Transactions Attachments));
}
sub PushCollections {
@@ -366,7 +409,7 @@ sub NextPage {
$last ||= 0;
- if ($self->{Clone}) {
+ if ($self->{Clone} || $self->{All}) {
# Clone provides guaranteed ordering by id and with no other id limits
# worry about trampling
@@ -396,7 +439,7 @@ sub Process {
# Skip all dependency walking if we're cloning; go straight to
# visiting them.
- if ($self->{Clone} and $uid) {
+ if ( ($self->{Clone} || $self->{All}) and $uid) {
return if $obj->isa("RT::System");
$self->{progress}->($obj) if $self->{progress};
return $self->Visit(%args);
@@ -530,11 +573,17 @@ sub Visit {
undef,
\%data,
);
- } elsif ($self->{Clone}) {
+ } elsif ($self->{Clone} || $self->{All}) {
# Short-circuit and get Just The Basics, Sir if we're cloning
my $class = ref($obj);
my $uid = $obj->UID;
- my %data = $obj->RT::Record::Serialize( UIDs => 0 );
+ my %data;
+ if ( $self->{Clone} ) {
+ %data = $obj->RT::Record::Serialize( serializer => $self, UIDs => 0 );
+ }
+ else {
+ %data = $obj->Serialize( serializer => $self, UIDs => 1 );
+ }
# +class is used when seeing a record of one class might insert
# a separate record into the stream
diff --git a/lib/RT/User.pm b/lib/RT/User.pm
index 76458baabc..26a4692738 100644
--- a/lib/RT/User.pm
+++ b/lib/RT/User.pm
@@ -3102,14 +3102,21 @@ sub PreInflate {
return;
}
- # Create a principal first, so we know what ID to use
- my $id = $importer->NextPrincipalId( PrincipalType => 'User', Disabled => $disabled );
+ # serialized data with --all has principals, in which case we don't need to create a new one.
+ my $principal_obj = $importer->LookupObj( $principal_uid );
+ if ( $principal_obj ) {
+ $data->{id} = $principal_obj->Id;
+ }
+ else {
- # Now we have a principal id, set the id for the user record
- $data->{id} = $id;
+ # Create a principal first, so we know what ID to use
+ my $id = $importer->NextPrincipalId( PrincipalType => 'User', Disabled => $disabled );
- $importer->Resolve( $principal_uid => 'RT::Principal', $id );
- $data->{id} = $id;
+ # Now we have a principal id, set the id for the user record
+ $data->{id} = $id;
+
+ $importer->Resolve( $principal_uid => 'RT::Principal', $id );
+ }
return $class->SUPER::PreInflate( $importer, $uid, $data );
}
diff --git a/sbin/rt-serializer.in b/sbin/rt-serializer.in
index c6fe187898..482b2c542d 100644
--- a/sbin/rt-serializer.in
+++ b/sbin/rt-serializer.in
@@ -113,6 +113,7 @@ GetOptions(
"clone",
"incremental",
+ "all",
"gc=i",
"page=i",
@@ -141,6 +142,7 @@ $args{FollowAssets} = $OPT{assets} if defined $OPT{assets};
$args{Clone} = $OPT{clone} if $OPT{clone};
$args{Incremental} = $OPT{incremental} if $OPT{incremental};
+$args{All} = $OPT{all} if $OPT{all};
$args{GC} = defined $OPT{gc} ? $OPT{gc} : 5000;
$args{Page} = defined $OPT{page} ? $OPT{page} : 100;
@@ -184,9 +186,9 @@ if ($OPT{'limit-cfs'}) {
$args{CustomFields} = \@cf_ids;
}
-if (($OPT{clone} or $OPT{incremental})
+if (($OPT{clone} or $OPT{incremental} or $OPT{all})
and grep { /^(users|groups|deleted|disabled|scrips|tickets|transactions|acls|assets)$/ } keys %OPT) {
- die "You cannot specify object types when cloning.\n\nPlease see $0 --help.\n";
+ die "You cannot specify object types when cloning or with --all.\n\nPlease see $0 --help.\n";
}
my $walker;
@@ -417,6 +419,14 @@ C<--clone> with any option that limits object types serialized. No
dependency walking is performed when cloning. C<rt-importer> will detect
that your serialized data set was generated by a clone.
+=item B<--all>
+
+Serializes your entire database, creating a clone-like data. Both C<--all>
+and C<--clone> do not check dependencies, the difference is C<--all>
+generates UIDs: it means the ids in source instance do not necessirily
+be synced to target instance, which makes it quite useful to fully merge
+multiple RT instances.
+
=item B<--incremental>
Will generate an incremenal serialized dataset using the data stored in