Copyright 2014 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

package profile

import (
	
	
	
	
)
Compact performs garbage collection on a profile to remove any unreferenced fields. This is useful to reduce the size of a profile after samples or locations have been removed.
func ( *Profile) () *Profile {
	, _ = Merge([]*Profile{})
	return 
}
Merge merges all the profiles in profs into a single Profile. Returns a new profile independent of the input profiles. The merged profile is compacted to eliminate unused samples, locations, functions and mappings. Profiles must have identical profile sample and period types or the merge will fail. profile.Period of the resulting profile will be the maximum of all profiles, and profile.TimeNanos will be the earliest nonzero one.
func ( []*Profile) (*Profile, error) {
	if len() == 0 {
		return nil, fmt.Errorf("no profiles to merge")
	}
	,  := combineHeaders()
	if  != nil {
		return nil, 
	}

	 := &profileMerger{
		p:         ,
		samples:   make(map[sampleKey]*Sample, len([0].Sample)),
		locations: make(map[locationKey]*Location, len([0].Location)),
		functions: make(map[functionKey]*Function, len([0].Function)),
		mappings:  make(map[mappingKey]*Mapping, len([0].Mapping)),
	}

Clear the profile-specific hash tables
The Mapping list has the property that the first mapping represents the main binary. Take the first Mapping we see, otherwise the operations below will add mappings in an arbitrary order.
			.mapMapping(.Mapping[0])
		}

		for ,  := range .Sample {
			if !isZeroSample() {
				.mapSample()
			}
		}
	}

	for ,  := range .Sample {
If there are any zero samples, re-merge the profile to GC them.
			return ([]*Profile{})
		}
	}

	return , nil
}
Normalize normalizes the source profile by multiplying each value in profile by the ratio of the sum of the base profile's values of that sample type to the sum of the source profile's value of that sample type.
func ( *Profile) ( *Profile) error {

	if  := .compatible();  != nil {
		return 
	}

	 := make([]int64, len(.SampleType))
	for ,  := range .Sample {
		for ,  := range .Value {
			[] += 
		}
	}

	 := make([]int64, len(.SampleType))
	for ,  := range .Sample {
		for ,  := range .Value {
			[] += 
		}
	}

	 := make([]float64, len())
	for  := range  {
		if [] == 0 {
			[] = 0.0
		} else {
			[] = float64([]) / float64([])
		}
	}
	.ScaleN()
	return nil
}

func ( *Sample) bool {
	for ,  := range .Value {
		if  != 0 {
			return false
		}
	}
	return true
}

type profileMerger struct {
	p *Profile
Memoization tables within a profile.
Memoization tables for profile entities.
	samples   map[sampleKey]*Sample
	locations map[locationKey]*Location
	functions map[functionKey]*Function
	mappings  map[mappingKey]*Mapping
}

type mapInfo struct {
	m      *Mapping
	offset int64
}

func ( *profileMerger) ( *Sample) *Sample {
	 := &Sample{
		Location: make([]*Location, len(.Location)),
		Value:    make([]int64, len(.Value)),
		Label:    make(map[string][]string, len(.Label)),
		NumLabel: make(map[string][]int64, len(.NumLabel)),
		NumUnit:  make(map[string][]string, len(.NumLabel)),
	}
	for ,  := range .Location {
		.Location[] = .mapLocation()
	}
	for ,  := range .Label {
		 := make([]string, len())
		copy(, )
		.Label[] = 
	}
	for ,  := range .NumLabel {
		 := .NumUnit[]
		 := make([]int64, len())
		 := make([]string, len())
		copy(, )
		copy(, )
		.NumLabel[] = 
		.NumUnit[] = 
Check memoization table. Must be done on the remapped location to account for the remapped mapping. Add current values to the existing sample.
	 := .key()
	if ,  := .samples[];  {
		for ,  := range .Value {
			.Value[] += 
		}
		return 
	}
	copy(.Value, .Value)
	.samples[] = 
	.p.Sample = append(.p.Sample, )
	return 
}
key generates sampleKey to be used as a key for maps.
func ( *Sample) () sampleKey {
	 := make([]string, len(.Location))
	for ,  := range .Location {
		[] = strconv.FormatUint(.ID, 16)
	}

	 := make([]string, 0, len(.Label))
	for ,  := range .Label {
		 = append(, fmt.Sprintf("%q%q", , ))
	}
	sort.Strings()

	 := make([]string, 0, len(.NumLabel))
	for ,  := range .NumLabel {
		 = append(, fmt.Sprintf("%q%x%x", , , .NumUnit[]))
	}
	sort.Strings()

	return sampleKey{
		strings.Join(, "|"),
		strings.Join(, ""),
		strings.Join(, ""),
	}
}

type sampleKey struct {
	locations string
	labels    string
	numlabels string
}

func ( *profileMerger) ( *Location) *Location {
	if  == nil {
		return nil
	}

	if ,  := .locationsByID[.ID];  {
		.locationsByID[.ID] = 
		return 
	}

	 := .mapMapping(.Mapping)
	 := &Location{
		ID:       uint64(len(.p.Location) + 1),
		Mapping:  .m,
		Address:  uint64(int64(.Address) + .offset),
		Line:     make([]Line, len(.Line)),
		IsFolded: .IsFolded,
	}
	for ,  := range .Line {
		.Line[] = .mapLine()
Check memoization table. Must be done on the remapped location to account for the remapped mapping ID.
	 := .key()
	if ,  := .locations[];  {
		.locationsByID[.ID] = 
		return 
	}
	.locationsByID[.ID] = 
	.locations[] = 
	.p.Location = append(.p.Location, )
	return 
}
key generates locationKey to be used as a key for maps.
func ( *Location) () locationKey {
	 := locationKey{
		addr:     .Address,
		isFolded: .IsFolded,
	}
Normalizes address to handle address space randomization.
		.addr -= .Mapping.Start
		.mappingID = .Mapping.ID
	}
	 := make([]string, len(.Line)*2)
	for ,  := range .Line {
		if .Function != nil {
			[*2] = strconv.FormatUint(.Function.ID, 16)
		}
		[*2+1] = strconv.FormatInt(.Line, 16)
	}
	.lines = strings.Join(, "|")
	return 
}

type locationKey struct {
	addr, mappingID uint64
	lines           string
	isFolded        bool
}

func ( *profileMerger) ( *Mapping) mapInfo {
	if  == nil {
		return mapInfo{}
	}

	if ,  := .mappingsByID[.ID];  {
		return 
	}
Check memoization tables.
	 := .key()
	if ,  := .mappings[];  {
		 := mapInfo{, int64(.Start) - int64(.Start)}
		.mappingsByID[.ID] = 
		return 
	}
	 := &Mapping{
		ID:              uint64(len(.p.Mapping) + 1),
		Start:           .Start,
		Limit:           .Limit,
		Offset:          .Offset,
		File:            .File,
		BuildID:         .BuildID,
		HasFunctions:    .HasFunctions,
		HasFilenames:    .HasFilenames,
		HasLineNumbers:  .HasLineNumbers,
		HasInlineFrames: .HasInlineFrames,
	}
	.p.Mapping = append(.p.Mapping, )
Update memoization tables.
	.mappings[] = 
	 := mapInfo{, 0}
	.mappingsByID[.ID] = 
	return 
}
key generates encoded strings of Mapping to be used as a key for maps.
Normalize addresses to handle address space randomization. Round up to next 4K boundary to avoid minor discrepancies.
	const  = 0x1000

	 := .Limit - .Start
	 =  +  - 1
	 =  - ( % )
	 := mappingKey{
		size:   ,
		offset: .Offset,
	}

	switch {
	case .BuildID != "":
		.buildIDOrFile = .BuildID
	case .File != "":
		.buildIDOrFile = .File
A mapping containing neither build ID nor file name is a fake mapping. A key with empty buildIDOrFile is used for fake mappings so that they are treated as the same mapping during merging.
	}
	return 
}

type mappingKey struct {
	size, offset  uint64
	buildIDOrFile string
}

func ( *profileMerger) ( Line) Line {
	 := Line{
		Function: .mapFunction(.Function),
		Line:     .Line,
	}
	return 
}

func ( *profileMerger) ( *Function) *Function {
	if  == nil {
		return nil
	}
	if ,  := .functionsByID[.ID];  {
		return 
	}
	 := .key()
	if ,  := .functions[];  {
		.functionsByID[.ID] = 
		return 
	}
	 := &Function{
		ID:         uint64(len(.p.Function) + 1),
		Name:       .Name,
		SystemName: .SystemName,
		Filename:   .Filename,
		StartLine:  .StartLine,
	}
	.functions[] = 
	.functionsByID[.ID] = 
	.p.Function = append(.p.Function, )
	return 
}
key generates a struct to be used as a key for maps.
combineHeaders checks that all profiles can be merged and returns their combined profile.
func ( []*Profile) (*Profile, error) {
	for ,  := range [1:] {
		if  := [0].compatible();  != nil {
			return nil, 
		}
	}

	var , ,  int64
	var  []string
	 := map[string]bool{}
	var  string
	for ,  := range  {
		if  == 0 || .TimeNanos <  {
			 = .TimeNanos
		}
		 += .DurationNanos
		if  == 0 ||  < .Period {
			 = .Period
		}
		for ,  := range .Comments {
			if  := []; ! {
				 = append(, )
				[] = true
			}
		}
		if  == "" {
			 = .DefaultSampleType
		}
	}

	 := &Profile{
		SampleType: make([]*ValueType, len([0].SampleType)),

		DropFrames: [0].DropFrames,
		KeepFrames: [0].KeepFrames,

		TimeNanos:     ,
		DurationNanos: ,
		PeriodType:    [0].PeriodType,
		Period:        ,

		Comments:          ,
		DefaultSampleType: ,
	}
	copy(.SampleType, [0].SampleType)
	return , nil
}
compatible determines if two profiles can be compared/merged. returns nil if the profiles are compatible; otherwise an error with details on the incompatibility.
func ( *Profile) ( *Profile) error {
	if !equalValueType(.PeriodType, .PeriodType) {
		return fmt.Errorf("incompatible period types %v and %v", .PeriodType, .PeriodType)
	}

	if len(.SampleType) != len(.SampleType) {
		return fmt.Errorf("incompatible sample types %v and %v", .SampleType, .SampleType)
	}

	for  := range .SampleType {
		if !equalValueType(.SampleType[], .SampleType[]) {
			return fmt.Errorf("incompatible sample types %v and %v", .SampleType, .SampleType)
		}
	}
	return nil
}
equalValueType returns true if the two value types are semantically equal. It ignores the internal fields used during encode/decode.
func (,  *ValueType) bool {
	return .Type == .Type && .Unit == .Unit