Copyright (c) 2014, David Kitchen <david@buro9.com> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the organisation (Microcosm) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

package bluemonday

import (
	
	
	
	
	

	
)

var (
	dataAttribute             = regexp.MustCompile("^data-.+")
	dataAttributeXMLPrefix    = regexp.MustCompile("^xml.+")
	dataAttributeInvalidChars = regexp.MustCompile("[A-Z;]+")
)
Sanitize takes a string that contains a HTML fragment or document and applies the given policy whitelist. It returns a HTML string that has been sanitized by the policy or an empty string if an error has occurred (most likely as a consequence of extremely malformed input)
func ( *Policy) ( string) string {
	if strings.TrimSpace() == "" {
		return 
	}

	return .sanitize(strings.NewReader()).String()
}
SanitizeBytes takes a []byte that contains a HTML fragment or document and applies the given policy whitelist. It returns a []byte containing the HTML that has been sanitized by the policy or an empty []byte if an error has occurred (most likely as a consequence of extremely malformed input)
func ( *Policy) ( []byte) []byte {
	if len(bytes.TrimSpace()) == 0 {
		return 
	}

	return .sanitize(bytes.NewReader()).Bytes()
}
SanitizeReader takes an io.Reader that contains a HTML fragment or document and applies the given policy whitelist. It returns a bytes.Buffer containing the HTML that has been sanitized by the policy. Errors during sanitization will merely return an empty result.
func ( *Policy) ( io.Reader) *bytes.Buffer {
	return .sanitize()
}
Performs the actual sanitization process.
func ( *Policy) ( io.Reader) *bytes.Buffer {
It is possible that the developer has created the policy via: p := bluemonday.Policy{} rather than: p := bluemonday.NewPolicy() If this is the case, and if they haven't yet triggered an action that would initiliaze the maps, then we need to do that.
	.init()

	var (
		                     bytes.Buffer
		       bool
		    int64
		           bool
		    []string
		 string
	)

	 := html.NewTokenizer()
	for {
		if .Next() == html.ErrorToken {
			 := .Err()
End of input means end of processing
				return &
			}
Raw tokenizer error
			return &bytes.Buffer{}
		}

		 := .Token()
		switch .Type {
		case html.DoctypeToken:
DocType is not handled as there is no safe parsing mechanism provided by golang.org/x/net/html for the content, and this can be misused to insert HTML tags that are not then sanitized One might wish to recursively sanitize here using the same policy but I will need to do some further testing before considering this.
Comments are ignored by default

		case html.StartTagToken:

			 = .Data

			,  := .elsAndAttrs[.Data]
			if ! {
				if ,  := .setOfElementsToSkipContent[.Data];  {
					 = true
					++
				}
				if .addSpaces {
					.WriteString(" ")
				}
				break
			}

			if len(.Attr) != 0 {
				.Attr = .sanitizeAttrs(.Data, .Attr, )
			}

			if len(.Attr) == 0 {
				if !.allowNoAttrs(.Data) {
					 = true
					 = append(, .Data)
					if .addSpaces {
						.WriteString(" ")
					}
					break
				}
			}

			if ! {
				.WriteString(.String())
			}

		case html.EndTagToken:

			if  == .Data {
				 = ""
			}

			if  && [len()-1] == .Data {
				 = [:len()-1]
				if len() == 0 {
					 = false
				}
				if .addSpaces {
					.WriteString(" ")
				}
				break
			}

			if ,  := .elsAndAttrs[.Data]; ! {
				if ,  := .setOfElementsToSkipContent[.Data];  {
					--
					if  == 0 {
						 = false
					}
				}
				if .addSpaces {
					.WriteString(" ")
				}
				break
			}

			if ! {
				.WriteString(.String())
			}

		case html.SelfClosingTagToken:

			,  := .elsAndAttrs[.Data]
			if ! {
				if .addSpaces {
					.WriteString(" ")
				}
				break
			}

			if len(.Attr) != 0 {
				.Attr = .sanitizeAttrs(.Data, .Attr, )
			}

			if len(.Attr) == 0 && !.allowNoAttrs(.Data) {
				if .addSpaces {
					.WriteString(" ")
				}
				break
			}

			if ! {
				.WriteString(.String())
			}

		case html.TextToken:

			if ! {
				switch  {
not encouraged, but if a policy allows JavaScript we should not HTML escape it as that would break the output
					.WriteString(.Data)
not encouraged, but if a policy allows CSS styles we should not HTML escape it as that would break the output
					.WriteString(.Data)
HTML escape the text
					.WriteString(.String())
				}
			}
A token that didn't exist in the html package when we wrote this
			return &bytes.Buffer{}
		}
	}
}
sanitizeAttrs takes a set of element attribute policies and the global attribute policies and applies them to the []html.Attribute returning a set of html.Attributes that match the policies
func ( *Policy) (
	 string,
	 []html.Attribute,
	 map[string]attrPolicy,
) []html.Attribute {

	if len() == 0 {
		return 
	}
Builds a new attribute slice based on the whether the attribute has been whitelisted explicitly or globally.
	 := []html.Attribute{}
	for ,  := range  {
If we see a data attribute, let it through.
			if isDataAttribute(.Key) {
				 = append(, )
				continue
			}
Is there an element specific attribute policy that applies?
		if ,  := [.Key];  {
			if .regexp != nil {
				if .regexp.MatchString(.Val) {
					 = append(, )
					continue
				}
			} else {
				 = append(, )
				continue
			}
		}
Is there a global attribute policy that applies?
		if ,  := .globalAttrs[.Key];  {

			if .regexp != nil {
				if .regexp.MatchString(.Val) {
					 = append(, )
				}
			} else {
				 = append(, )
			}
		}
	}

If nothing was allowed, let's get out of here
		return 
cleanAttrs now contains the attributes that are permitted

	if linkable() {
Ensure URLs are parseable: - a.href - area.href - link.href - blockquote.cite - q.cite - img.src - script.src
			 := []html.Attribute{}
			for ,  := range  {
				switch  {
				case "a", "area", "link":
					if .Key == "href" {
						if ,  := .validURL(.Val);  {
							.Val = 
							 = append(, )
						}
						break
					}
					 = append(, )
				case "blockquote", "q":
					if .Key == "cite" {
						if ,  := .validURL(.Val);  {
							.Val = 
							 = append(, )
						}
						break
					}
					 = append(, )
				case "img", "script":
					if .Key == "src" {
						if ,  := .validURL(.Val);  {
							.Val = 
							 = append(, )
						}
						break
					}
					 = append(, )
				default:
					 = append(, )
				}
			}
			 = 
		}

		if (.requireNoFollow ||
			.requireNoFollowFullyQualifiedLinks ||
			.addTargetBlankToFullyQualifiedLinks) &&
			len() > 0 {
Add rel="nofollow" if a "href" exists
			switch  {
			case "a", "area", "link":
				var  bool
				var  bool
				for ,  := range  {
					if .Key == "href" {
						 = true

						,  := url.Parse(.Val)
						if  != nil {
							continue
						}
						if .Host != "" {
							 = true
						}

						continue
					}
				}

				if  {
					var (
						    bool
						 bool
					)

					 := (.requireNoFollow ||
						 && .requireNoFollowFullyQualifiedLinks)

					 := ( &&
						.addTargetBlankToFullyQualifiedLinks)

					 := []html.Attribute{}
					for ,  := range  {

						var  bool
						if .Key == "rel" &&  {

							if strings.Contains(.Val, "nofollow") {
								 = true
								 = append(, )
								 = true
							} else {
								.Val += " nofollow"
								 = true
								 = append(, )
								 = true
							}
						}

						if  == "a" && .Key == "target" {
							if .Val == "_blank" {
								 = true
							}
							if  && ! {
								.Val = "_blank"
								 = true
								 = append(, )
								 = true
							}
						}

						if ! {
							 = append(, )
						}
					}
					if  ||  {
						 = 
					}

					if  && ! {
						 := html.Attribute{}
						.Key = "rel"
						.Val = "nofollow"
						 = append(, )
					}

					if  == "a" &&  && ! {
						 := html.Attribute{}
						.Key = "target"
						.Val = "_blank"
						 = true
						 = append(, )
					}

target="_blank" has a security risk that allows the opened window/tab to issue JavaScript calls against window.opener, which in effect allow the destination of the link to control the source: https://dev.to/ben/the-targetblank-vulnerability-by-example To mitigate this risk, we need to add a specific rel attribute if it is not already present. rel="noopener" Unfortunately this is processing the rel twice (we already looked at it earlier ^^) as we cannot be sure of the ordering of the href and rel, and whether we have fully satisfied that we need to do this. This double processing only happens *if* target="_blank" is true.
						var  bool
						 := []html.Attribute{}
						for ,  := range  {
							var  bool
							if .Key == "rel" {
								if strings.Contains(.Val, "noopener") {
									 = true
									 = append(, )
								} else {
									.Val += " noopener"
									 = true
									 = append(, )
								}

								 = true
							}
							if ! {
								 = append(, )
							}
						}
						if  {
							 = 
rel attr was not found, or else noopener would have been added already
							 := html.Attribute{}
							.Key = "rel"
							.Val = "noopener"
							 = append(, )
						}

					}
				}
			default:
			}
		}
	}

	return 
}

func ( *Policy) ( string) bool {
	,  := .setOfElementsAllowedWithoutAttrs[]
	return 
}

func ( *Policy) ( string) (string, bool) {
URLs are valid if when space is trimmed the URL is valid
		 = strings.TrimSpace()
URLs cannot contain whitespace, unless it is a data-uri
		if (strings.Contains(, " ") ||
			strings.Contains(, "\t") ||
			strings.Contains(, "\n")) &&
			!strings.HasPrefix(, `data:`) {
			return "", false
		}
URLs are valid if they parse
		,  := url.Parse()
		if  != nil {
			return "", false
		}

		if .Scheme != "" {

			,  := .allowURLSchemes[.Scheme]
			if ! {
				return "", false

			}

			if  == nil || () == true {
				return .String(), true
			}

			return "", false
		}

		if .allowRelativeURLs {
			if .String() != "" {
				return .String(), true
			}
		}

		return "", false
	}

	return , true
}

func ( string) bool {
	switch  {
	case "a", "area", "blockquote", "img", "link", "script":
		return true
	default:
		return false
	}
}

func ( string) bool {
	if !dataAttribute.MatchString() {
		return false
	}
	 := strings.Split(, "data-")
	if len() == 1 {
		return false
data-xml* is invalid.
no uppercase or semi-colons allowed.
	if dataAttributeInvalidChars.MatchString([1]) {
		return false
	}
	return true