feat: implement paragraph and italic parsers (#1725)

pull/1727/head
boojack 2 years ago committed by GitHub
parent 8c34be92a6
commit 42c653e1a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -18,25 +18,28 @@ func (*BoldParser) Match(tokens []*tokenizer.Token) *BoldParser {
}
prefixTokens := tokens[:2]
if len(prefixTokens) != 2 || prefixTokens[0].Type != prefixTokens[1].Type {
if prefixTokens[0].Type != prefixTokens[1].Type {
return nil
}
prefixTokenType := prefixTokens[0].Type
if prefixTokenType != tokenizer.Star && prefixTokenType != tokenizer.Underline {
return nil
}
contentTokens := []*tokenizer.Token{}
cursor := 2
cursor, matched := 2, false
for ; cursor < len(tokens)-1; cursor++ {
token, nextToken := tokens[cursor], tokens[cursor+1]
if token.Type == tokenizer.Newline || nextToken.Type == tokenizer.Newline {
break
return nil
}
if token.Type == prefixTokenType && nextToken.Type == prefixTokenType {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if cursor != len(tokens)-2 {
if !matched {
return nil
}

@ -0,0 +1,42 @@
package parser
import "github.com/usememos/memos/plugin/gomark/parser/tokenizer"
type ItalicParser struct {
ContentTokens []*tokenizer.Token
}
func NewItalicParser() *ItalicParser {
return &ItalicParser{}
}
func (*ItalicParser) Match(tokens []*tokenizer.Token) *ItalicParser {
if len(tokens) < 3 {
return nil
}
prefixTokens := tokens[:1]
if prefixTokens[0].Type != tokenizer.Star && prefixTokens[0].Type != tokenizer.Underline {
return nil
}
prefixTokenType := prefixTokens[0].Type
contentTokens := []*tokenizer.Token{}
matched := false
for _, token := range tokens[1:] {
if token.Type == tokenizer.Newline {
return nil
}
if token.Type == prefixTokenType {
matched = true
break
}
contentTokens = append(contentTokens, token)
}
if !matched || len(contentTokens) == 0 {
return nil
}
return &ItalicParser{
ContentTokens: contentTokens,
}
}

@ -0,0 +1,94 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func TestItalicParser(t *testing.T) {
tests := []struct {
text string
italic *ItalicParser
}{
{
text: "*Hello world!",
italic: nil,
},
{
text: "*Hello*",
italic: &ItalicParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Text,
Value: "Hello",
},
},
},
},
{
text: "* Hello *",
italic: &ItalicParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Space,
Value: " ",
},
{
Type: tokenizer.Text,
Value: "Hello",
},
{
Type: tokenizer.Space,
Value: " ",
},
},
},
},
{
text: "** Hello * *",
italic: nil,
},
{
text: "*1* Hello * *",
italic: &ItalicParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Text,
Value: "1",
},
},
},
},
{
text: `* \n * Hello * *`,
italic: &ItalicParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Space,
Value: " ",
},
{
Type: tokenizer.Text,
Value: `\n`,
},
{
Type: tokenizer.Space,
Value: " ",
},
},
},
},
{
text: "* \n * Hello * *",
italic: nil,
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
italic := NewItalicParser()
require.Equal(t, test.italic, italic.Match(tokens))
}
}

@ -0,0 +1,30 @@
package parser
import "github.com/usememos/memos/plugin/gomark/parser/tokenizer"
type ParagraphParser struct {
ContentTokens []*tokenizer.Token
}
func NewParagraphParser() *ParagraphParser {
return &ParagraphParser{}
}
func (*ParagraphParser) Match(tokens []*tokenizer.Token) *ParagraphParser {
contentTokens := []*tokenizer.Token{}
cursor := 0
for ; cursor < len(tokens); cursor++ {
token := tokens[cursor]
if token.Type == tokenizer.Newline {
break
}
contentTokens = append(contentTokens, token)
}
if len(contentTokens) == 0 {
return nil
}
return &ParagraphParser{
ContentTokens: contentTokens,
}
}

@ -0,0 +1,85 @@
package parser
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/usememos/memos/plugin/gomark/parser/tokenizer"
)
func TestParagraphParser(t *testing.T) {
tests := []struct {
text string
paragraph *ParagraphParser
}{
{
text: "",
paragraph: nil,
},
{
text: "Hello world!",
paragraph: &ParagraphParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Text,
Value: "Hello",
},
{
Type: tokenizer.Space,
Value: " ",
},
{
Type: tokenizer.Text,
Value: "world!",
},
},
},
},
{
text: `Hello
world!`,
paragraph: &ParagraphParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Text,
Value: "Hello",
},
{
Type: tokenizer.Space,
Value: " ",
},
},
},
},
{
text: `Hello \n
world!`,
paragraph: &ParagraphParser{
ContentTokens: []*tokenizer.Token{
{
Type: tokenizer.Text,
Value: "Hello",
},
{
Type: tokenizer.Space,
Value: " ",
},
{
Type: tokenizer.Text,
Value: `\n`,
},
{
Type: tokenizer.Space,
Value: " ",
},
},
},
},
}
for _, test := range tests {
tokens := tokenizer.Tokenize(test.text)
paragraph := NewParagraphParser()
require.Equal(t, test.paragraph, paragraph.Match(tokens))
}
}
Loading…
Cancel
Save