Fix various documentation, user-facing, and source comment typos (#16367)

* Fix various doc, user-facing, and source comment typos

Found via `codespell -q 3 -S ./options/locale,./vendor -L ba,pullrequest,pullrequests,readby`
pull/16374/head
luzpaz 3 years ago committed by GitHub
parent bc6f060b8c
commit e0296b6a6d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      cmd/dump.go
  2. 4
      cmd/hook.go
  3. 2
      cmd/migrate_storage.go
  4. 2
      cmd/web_letsencrypt.go
  5. 2
      custom/conf/app.example.ini
  6. 2
      docker/root/etc/s6/gitea/setup
  7. 2
      docker/rootless/usr/local/bin/docker-setup.sh
  8. 16
      docs/content/doc/advanced/config-cheat-sheet.en-us.md
  9. 2
      docs/content/doc/advanced/logging-documentation.en-us.md
  10. 4
      docs/content/doc/features/authentication.en-us.md
  11. 2
      docs/content/doc/help/faq.en-us.md
  12. 4
      docs/content/page/index.en-us.md
  13. 2
      integrations/README.md
  14. 2
      integrations/release_test.go
  15. 2
      models/branches.go
  16. 2
      models/context.go
  17. 4
      models/error.go
  18. 4
      models/gpg_key.go
  19. 6
      models/issue_comment.go
  20. 2
      models/issue_dependency.go
  21. 2
      models/issue_label_test.go
  22. 2
      models/login_source.go
  23. 2
      models/migrations/migrations.go
  24. 4
      models/migrations/v111.go
  25. 2
      models/migrations/v147.go
  26. 6
      models/oauth2_application.go
  27. 2
      models/org.go
  28. 2
      models/repo.go
  29. 4
      models/repo_list.go
  30. 4
      models/repo_permission.go
  31. 2
      models/repo_pushmirror.go
  32. 4
      models/review.go
  33. 2
      models/topic.go
  34. 4
      models/user.go
  35. 2
      models/user_mail.go
  36. 2
      modules/context/repo.go
  37. 2
      modules/git/blame.go
  38. 10
      modules/git/repo_commit.go
  39. 2
      modules/git/repo_stats.go
  40. 2
      modules/git/utils.go
  41. 4
      modules/indexer/code/elastic_search.go
  42. 2
      modules/lfs/shared.go
  43. 6
      modules/log/colors_router.go
  44. 2
      modules/markup/html.go
  45. 2
      modules/markup/markdown/ast.go
  46. 4
      modules/markup/mdstripper/mdstripper_test.go
  47. 2
      modules/migrations/base/downloader.go
  48. 2
      modules/migrations/base/label.go
  49. 2
      modules/migrations/base/uploader.go
  50. 2
      modules/migrations/github.go
  51. 2
      modules/migrations/gitlab.go
  52. 2
      modules/migrations/gogs.go
  53. 2
      modules/notification/base/null.go
  54. 2
      modules/references/references_test.go
  55. 2
      modules/repofiles/action_test.go
  56. 4
      modules/repofiles/content_test.go
  57. 4
      modules/repository/adopt.go
  58. 2
      modules/secret/secret.go
  59. 2
      modules/setting/indexer.go
  60. 2
      modules/setting/session.go
  61. 2
      modules/storage/storage.go
  62. 2
      modules/task/migrate.go
  63. 2
      modules/typesniffer/typesniffer.go
  64. 2
      modules/util/path.go
  65. 4
      modules/util/shellquote_test.go
  66. 2
      modules/validation/helpers.go
  67. 6
      modules/validation/helpers_test.go
  68. 2
      modules/validation/refname_test.go
  69. 2
      routers/api/v1/repo/issue_tracked_time.go
  70. 2
      routers/api/v1/user/app.go
  71. 4
      routers/web/admin/users.go
  72. 4
      routers/web/org/setting.go
  73. 2
      routers/web/repo/http.go
  74. 2
      routers/web/repo/issue.go
  75. 2
      routers/web/repo/view.go
  76. 2
      routers/web/user/auth.go
  77. 2
      routers/web/web.go
  78. 2
      services/auth/sspi_windows.go
  79. 2
      services/issue/assignee.go
  80. 2
      services/mailer/mailer.go
  81. 2
      services/wiki/wiki_test.go
  82. 4
      web_src/fomantic/build/semantic.css
  83. 12
      web_src/fomantic/build/semantic.js
  84. 4
      web_src/js/index.js
  85. 4
      web_src/less/_base.less
  86. 2
      web_src/less/helpers.less

@ -280,7 +280,7 @@ func runDump(ctx *cli.Context) error {
} }
if ctx.IsSet("skip-custom-dir") && ctx.Bool("skip-custom-dir") { if ctx.IsSet("skip-custom-dir") && ctx.Bool("skip-custom-dir") {
log.Info("Skiping custom directory") log.Info("Skipping custom directory")
} else { } else {
customDir, err := os.Stat(setting.CustomPath) customDir, err := os.Stat(setting.CustomPath)
if err == nil && customDir.IsDir() { if err == nil && customDir.IsDir() {

@ -165,7 +165,7 @@ Gitea or set your environment appropriately.`, "")
} }
} }
// the environment setted on serv command // the environment is set by serv command
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true" isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
username := os.Getenv(models.EnvRepoUsername) username := os.Getenv(models.EnvRepoUsername)
reponame := os.Getenv(models.EnvRepoName) reponame := os.Getenv(models.EnvRepoName)
@ -320,7 +320,7 @@ Gitea or set your environment appropriately.`, "")
} }
} }
// the environment setted on serv command // the environment is set by serv command
repoUser := os.Getenv(models.EnvRepoUsername) repoUser := os.Getenv(models.EnvRepoUsername)
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true" isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
repoName := os.Getenv(models.EnvRepoName) repoName := os.Getenv(models.EnvRepoName)

@ -184,7 +184,7 @@ func runMigrateStorage(ctx *cli.Context) error {
return fmt.Errorf("Unsupported storage: %s", ctx.String("type")) return fmt.Errorf("Unsupported storage: %s", ctx.String("type"))
} }
log.Warn("All files have been copied to the new placement but old files are still on the orignial placement.") log.Warn("All files have been copied to the new placement but old files are still on the original placement.")
return nil return nil
} }

@ -19,7 +19,7 @@ import (
func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) error { func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) error {
// If HTTP Challenge enabled, needs to be serving on port 80. For TLSALPN needs 443. // If HTTP Challenge enabled, needs to be serving on port 80. For TLSALPN needs 443.
// Due to docker port mapping this can't be checked programatically // Due to docker port mapping this can't be checked programmatically
// TODO: these are placeholders until we add options for each in settings with appropriate warning // TODO: these are placeholders until we add options for each in settings with appropriate warning
enableHTTPChallenge := true enableHTTPChallenge := true
enableTLSALPNChallenge := true enableTLSALPNChallenge := true

@ -659,7 +659,7 @@ PATH =
;; Public is for users visible for everyone ;; Public is for users visible for everyone
;DEFAULT_USER_VISIBILITY = public ;DEFAULT_USER_VISIBILITY = public
;; ;;
;; Set whitch visibibilty modes a user can have ;; Set which visibility modes a user can have
;ALLOWED_USER_VISIBILITY_MODES = public,limited,private ;ALLOWED_USER_VISIBILITY_MODES = public,limited,private
;; ;;
;; Either "public", "limited" or "private", default is "public" ;; Either "public", "limited" or "private", default is "public"

@ -23,7 +23,7 @@ if [ ! -f ${GITEA_CUSTOM}/conf/app.ini ]; then
INSTALL_LOCK=true INSTALL_LOCK=true
fi fi
# Substitude the environment variables in the template # Substitute the environment variables in the template
APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \ APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \
RUN_MODE=${RUN_MODE:-"prod"} \ RUN_MODE=${RUN_MODE:-"prod"} \
DOMAIN=${DOMAIN:-"localhost"} \ DOMAIN=${DOMAIN:-"localhost"} \

@ -25,7 +25,7 @@ if [ ! -f ${GITEA_APP_INI} ]; then
INSTALL_LOCK=true INSTALL_LOCK=true
fi fi
# Substitude the environment variables in the template # Substitute the environment variables in the template
APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \ APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \
RUN_MODE=${RUN_MODE:-"prod"} \ RUN_MODE=${RUN_MODE:-"prod"} \
RUN_USER=${USER:-"git"} \ RUN_USER=${USER:-"git"} \

@ -128,8 +128,8 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
- Options other than `never` and `always` can be combined as a comma separated list. - Options other than `never` and `always` can be combined as a comma separated list.
- `DEFAULT_TRUST_MODEL`: **collaborator**: \[collaborator, committer, collaboratorcommitter\]: The default trust model used for verifying commits. - `DEFAULT_TRUST_MODEL`: **collaborator**: \[collaborator, committer, collaboratorcommitter\]: The default trust model used for verifying commits.
- `collaborator`: Trust signatures signed by keys of collaborators. - `collaborator`: Trust signatures signed by keys of collaborators.
- `committer`: Trust signatures that match committers (This matches GitHub and will force Gitea signed commits to have Gitea as the commmitter). - `committer`: Trust signatures that match committers (This matches GitHub and will force Gitea signed commits to have Gitea as the committer).
- `collaboratorcommitter`: Trust signatures signed by keys of collaborators which match the commiter. - `collaboratorcommitter`: Trust signatures signed by keys of collaborators which match the committer.
- `WIKI`: **never**: \[never, pubkey, twofa, always, parentsigned\]: Sign commits to wiki. - `WIKI`: **never**: \[never, pubkey, twofa, always, parentsigned\]: Sign commits to wiki.
- `CRUD_ACTIONS`: **pubkey, twofa, parentsigned**: \[never, pubkey, twofa, parentsigned, always\]: Sign CRUD actions. - `CRUD_ACTIONS`: **pubkey, twofa, parentsigned**: \[never, pubkey, twofa, parentsigned, always\]: Sign CRUD actions.
- Options as above, with the addition of: - Options as above, with the addition of:
@ -345,9 +345,9 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `PATH`: **data/gitea.db**: For SQLite3 only, the database file path. - `PATH`: **data/gitea.db**: For SQLite3 only, the database file path.
- `LOG_SQL`: **true**: Log the executed SQL. - `LOG_SQL`: **true**: Log the executed SQL.
- `DB_RETRIES`: **10**: How many ORM init / DB connect attempts allowed. - `DB_RETRIES`: **10**: How many ORM init / DB connect attempts allowed.
- `DB_RETRY_BACKOFF`: **3s**: time.Duration to wait before trying another ORM init / DB connect attempt, if failure occured. - `DB_RETRY_BACKOFF`: **3s**: time.Duration to wait before trying another ORM init / DB connect attempt, if failure occurred.
- `MAX_OPEN_CONNS` **0**: Database maximum open connections - default is 0, meaning there is no limit. - `MAX_OPEN_CONNS` **0**: Database maximum open connections - default is 0, meaning there is no limit.
- `MAX_IDLE_CONNS` **2**: Max idle database connections on connnection pool, default is 2 - this will be capped to `MAX_OPEN_CONNS`. - `MAX_IDLE_CONNS` **2**: Max idle database connections on connection pool, default is 2 - this will be capped to `MAX_OPEN_CONNS`.
- `CONN_MAX_LIFETIME` **0 or 3s**: Sets the maximum amount of time a DB connection may be reused - default is 0, meaning there is no limit (except on MySQL where it is 3s - see #6804 & #7071). - `CONN_MAX_LIFETIME` **0 or 3s**: Sets the maximum amount of time a DB connection may be reused - default is 0, meaning there is no limit (except on MySQL where it is 3s - see #6804 & #7071).
Please see #8540 & #8273 for further discussion of the appropriate values for `MAX_OPEN_CONNS`, `MAX_IDLE_CONNS` & `CONN_MAX_LIFETIME` and their Please see #8540 & #8273 for further discussion of the appropriate values for `MAX_OPEN_CONNS`, `MAX_IDLE_CONNS` & `CONN_MAX_LIFETIME` and their
@ -385,7 +385,7 @@ relation to port exhaustion.
- `LENGTH`: **20**: Maximal queue size before channel queues block - `LENGTH`: **20**: Maximal queue size before channel queues block
- `BATCH_LENGTH`: **20**: Batch data before passing to the handler - `BATCH_LENGTH`: **20**: Batch data before passing to the handler
- `CONN_STR`: **redis://127.0.0.1:6379/0**: Connection string for the redis queue type. Options can be set using query params. Similarly LevelDB options can also be set using: **leveldb://relative/path?option=value** or **leveldb:///absolute/path?option=value**, and will override `DATADIR` - `CONN_STR`: **redis://127.0.0.1:6379/0**: Connection string for the redis queue type. Options can be set using query params. Similarly LevelDB options can also be set using: **leveldb://relative/path?option=value** or **leveldb:///absolute/path?option=value**, and will override `DATADIR`
- `QUEUE_NAME`: **_queue**: The suffix for default redis and disk queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overriden in the specific `queue.name` section. - `QUEUE_NAME`: **_queue**: The suffix for default redis and disk queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overridden in the specific `queue.name` section.
- `SET_NAME`: **_unique**: The suffix that will be added to the default redis and disk queue `set` name for unique queues. Individual queues will default to - `SET_NAME`: **_unique**: The suffix that will be added to the default redis and disk queue `set` name for unique queues. Individual queues will default to
**`name`**`QUEUE_NAME`_`SET_NAME`_ but can be overridden in the specific `queue.name` section. **`name`**`QUEUE_NAME`_`SET_NAME`_ but can be overridden in the specific `queue.name` section.
- `WRAP_IF_NECESSARY`: **true**: Will wrap queues with a timeoutable queue if the selected queue is not ready to be created - (Only relevant for the level queue.) - `WRAP_IF_NECESSARY`: **true**: Will wrap queues with a timeoutable queue if the selected queue is not ready to be created - (Only relevant for the level queue.)
@ -516,7 +516,7 @@ relation to port exhaustion.
- `AUTO_WATCH_NEW_REPOS`: **true**: Enable this to let all organisation users watch new repos when they are created - `AUTO_WATCH_NEW_REPOS`: **true**: Enable this to let all organisation users watch new repos when they are created
- `AUTO_WATCH_ON_CHANGES`: **false**: Enable this to make users watch a repository after their first commit to it - `AUTO_WATCH_ON_CHANGES`: **false**: Enable this to make users watch a repository after their first commit to it
- `DEFAULT_USER_VISIBILITY`: **public**: Set default visibility mode for users, either "public", "limited" or "private". - `DEFAULT_USER_VISIBILITY`: **public**: Set default visibility mode for users, either "public", "limited" or "private".
- `ALLOWED_USER_VISIBILITY_MODES`: **public,limited,private**: Set whitch visibibilty modes a user can have - `ALLOWED_USER_VISIBILITY_MODES`: **public,limited,private**: Set which visibility modes a user can have
- `DEFAULT_ORG_VISIBILITY`: **public**: Set default visibility mode for organisations, either "public", "limited" or "private". - `DEFAULT_ORG_VISIBILITY`: **public**: Set default visibility mode for organisations, either "public", "limited" or "private".
- `DEFAULT_ORG_MEMBER_VISIBLE`: **false** True will make the membership of the users visible when added to the organisation. - `DEFAULT_ORG_MEMBER_VISIBLE`: **false** True will make the membership of the users visible when added to the organisation.
- `ALLOW_ONLY_INTERNAL_REGISTRATION`: **false** Set to true to force registration only via gitea. - `ALLOW_ONLY_INTERNAL_REGISTRATION`: **false** Set to true to force registration only via gitea.
@ -895,7 +895,7 @@ IS_INPUT_FILE = false
- ENABLED: **false** Enable markup support; set to **true** to enable this renderer. - ENABLED: **false** Enable markup support; set to **true** to enable this renderer.
- NEED\_POSTPROCESS: **true** set to **true** to replace links / sha1 and etc. - NEED\_POSTPROCESS: **true** set to **true** to replace links / sha1 and etc.
- FILE\_EXTENSIONS: **\<empty\>** List of file extensions that should be rendered by an external - FILE\_EXTENSIONS: **\<empty\>** List of file extensions that should be rendered by an external
command. Multiple extentions needs a comma as splitter. command. Multiple extensions needs a comma as splitter.
- RENDER\_COMMAND: External command to render all matching extensions. - RENDER\_COMMAND: External command to render all matching extensions.
- IS\_INPUT\_FILE: **false** Input is not a standard input but a file param followed `RENDER_COMMAND`. - IS\_INPUT\_FILE: **false** Input is not a standard input but a file param followed `RENDER_COMMAND`.
@ -927,7 +927,7 @@ If the rule is defined above the renderer ini section or the name does not match
## Time (`time`) ## Time (`time`)
- `FORMAT`: Time format to diplay on UI. i.e. RFC1123 or 2006-01-02 15:04:05 - `FORMAT`: Time format to display on UI. i.e. RFC1123 or 2006-01-02 15:04:05
- `DEFAULT_UI_LOCATION`: Default location of time on the UI, so that we can display correct user's time on UI. i.e. Shanghai/Asia - `DEFAULT_UI_LOCATION`: Default location of time on the UI, so that we can display correct user's time on UI. i.e. Shanghai/Asia
## Task (`task`) ## Task (`task`)

@ -282,7 +282,7 @@ ROUTER = console
COLORIZE = false ; this can be true if you can strip out the ansi coloring COLORIZE = false ; this can be true if you can strip out the ansi coloring
``` ```
Sometimes it will be helpful get some specific `TRACE` level logging retricted Sometimes it will be helpful get some specific `TRACE` level logging restricted
to messages that match a specific `EXPRESSION`. Adjusting the `MODE` in the to messages that match a specific `EXPRESSION`. Adjusting the `MODE` in the
`[log]` section to `MODE = console,traceconsole` to add a new logger output `[log]` section to `MODE = console,traceconsole` to add a new logger output
`traceconsole` and then adding its corresponding section would be helpful: `traceconsole` and then adding its corresponding section would be helpful:

@ -259,7 +259,7 @@ Before activating SSPI single sign-on authentication (SSO) you have to prepare y
- Create a service principal name for the host where `gitea.exe` is running with class `HTTP`: - Create a service principal name for the host where `gitea.exe` is running with class `HTTP`:
- Start `Command Prompt` or `PowerShell` as a priviledged domain user (eg. Domain Administrator) - Start `Command Prompt` or `PowerShell` as a privileged domain user (eg. Domain Administrator)
- Run the command below, replacing `host.domain.local` with the fully qualified domain name (FQDN) of the server where the web application will be running, and `domain\user` with the name of the account created in the previous step: - Run the command below, replacing `host.domain.local` with the fully qualified domain name (FQDN) of the server where the web application will be running, and `domain\user` with the name of the account created in the previous step:
```sh ```sh
@ -283,7 +283,7 @@ Before activating SSPI single sign-on authentication (SSO) you have to prepare y
- Click the `Sign In` button on the dashboard and choose SSPI to be automatically logged in with the same user that is currently logged on to the computer - Click the `Sign In` button on the dashboard and choose SSPI to be automatically logged in with the same user that is currently logged on to the computer
- If it does not work, make sure that: - If it does not work, make sure that:
- You are not running the web browser on the same server where gitea is running. You should be running the web browser on a domain joined computer (client) that is different from the server. If both the client and server are runnning on the same computer NTLM will be prefered over Kerberos. - You are not running the web browser on the same server where gitea is running. You should be running the web browser on a domain joined computer (client) that is different from the server. If both the client and server are running on the same computer NTLM will be preferred over Kerberos.
- There is only one `HTTP/...` SPN for the host - There is only one `HTTP/...` SPN for the host
- The SPN contains only the hostname, without the port - The SPN contains only the hostname, without the port
- You have added the URL of the web app to the `Local intranet zone` - You have added the URL of the web app to the `Local intranet zone`

@ -142,7 +142,7 @@ The current way to achieve this is to create/modify a user with a max repo creat
Restricted users are limited to a subset of the content based on their organization/team memberships and collaborations, ignoring the public flag on organizations/repos etc.\_\_ Restricted users are limited to a subset of the content based on their organization/team memberships and collaborations, ignoring the public flag on organizations/repos etc.\_\_
Example use case: A company runs a Gitea instance that requires login. Most repos are public (accessible/browseable by all co-workers). Example use case: A company runs a Gitea instance that requires login. Most repos are public (accessible/browsable by all co-workers).
At some point, a customer or third party needs access to a specific repo and only that repo. Making such a customer account restricted and granting any needed access using team membership(s) and/or collaboration(s) is a simple way to achieve that without the need to make everything private. At some point, a customer or third party needs access to a specific repo and only that repo. Making such a customer account restricted and granting any needed access using team membership(s) and/or collaboration(s) is a simple way to achieve that without the need to make everything private.

@ -117,7 +117,7 @@ Windows, on architectures like amd64, i386, ARM, PowerPC, and others.
- Configuration viewer - Configuration viewer
- Everything in config file - Everything in config file
- System notices - System notices
- When somthing unexpected happens - When something unexpected happens
- Monitoring - Monitoring
- Current processes - Current processes
- Cron jobs - Cron jobs
@ -155,7 +155,7 @@ Windows, on architectures like amd64, i386, ARM, PowerPC, and others.
- Libravatar - Libravatar
- Custom - Custom
- Password - Password
- Mutiple email addresses - Multiple email addresses
- SSH Keys - SSH Keys
- Connected applications - Connected applications
- Two factor authentication - Two factor authentication

@ -28,7 +28,7 @@ make test-sqlite
Setup a mysql database inside docker Setup a mysql database inside docker
``` ```
docker run -e "MYSQL_DATABASE=test" -e "MYSQL_ALLOW_EMPTY_PASSWORD=yes" -p 3306:3306 --rm --name mysql mysql:latest #(just ctrl-c to stop db and clean the container) docker run -e "MYSQL_DATABASE=test" -e "MYSQL_ALLOW_EMPTY_PASSWORD=yes" -p 3306:3306 --rm --name mysql mysql:latest #(just ctrl-c to stop db and clean the container)
docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" --rm --name elasticsearch elasticsearch:7.6.0 #(in a secound terminal, just ctrl-c to stop db and clean the container) docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" --rm --name elasticsearch elasticsearch:7.6.0 #(in a second terminal, just ctrl-c to stop db and clean the container)
``` ```
Start tests based on the database container Start tests based on the database container
``` ```

@ -116,7 +116,7 @@ func TestCreateReleasePaging(t *testing.T) {
setting.API.DefaultPagingNum = 10 setting.API.DefaultPagingNum = 10
session := loginUser(t, "user2") session := loginUser(t, "user2")
// Create enaugh releases to have paging // Create enough releases to have paging
for i := 0; i < 12; i++ { for i := 0; i < 12; i++ {
version := fmt.Sprintf("v0.0.%d", i) version := fmt.Sprintf("v0.0.%d", i)
createNewRelease(t, session, "/user2/repo1", version, version, false, false) createNewRelease(t, session, "/user2/repo1", version, version, false, false)

@ -219,7 +219,7 @@ func (protectBranch *ProtectedBranch) GetProtectedFilePatterns() []glob.Glob {
expr = strings.TrimSpace(expr) expr = strings.TrimSpace(expr)
if expr != "" { if expr != "" {
if g, err := glob.Compile(expr, '.', '/'); err != nil { if g, err := glob.Compile(expr, '.', '/'); err != nil {
log.Info("Invalid glob expresion '%s' (skipped): %v", expr, err) log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
} else { } else {
extarr = append(extarr, g) extarr = append(extarr, g)
} }

@ -42,7 +42,7 @@ func WithContext(f func(ctx DBContext) error) error {
return f(DBContext{x}) return f(DBContext{x})
} }
// WithTx represents executing database operations on a trasaction // WithTx represents executing database operations on a transaction
func WithTx(f func(ctx DBContext) error) error { func WithTx(f func(ctx DBContext) error) error {
sess := x.NewSession() sess := x.NewSession()
if err := sess.Begin(); err != nil { if err := sess.Begin(); err != nil {

@ -1114,7 +1114,7 @@ func IsErrUserDoesNotHaveAccessToRepo(err error) bool {
} }
func (err ErrUserDoesNotHaveAccessToRepo) Error() string { func (err ErrUserDoesNotHaveAccessToRepo) Error() string {
return fmt.Sprintf("user doesn't have acces to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName) return fmt.Sprintf("user doesn't have access to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName)
} }
// ErrWontSign explains the first reason why a commit would not be signed // ErrWontSign explains the first reason why a commit would not be signed
@ -1289,7 +1289,7 @@ func IsErrSHAOrCommitIDNotProvided(err error) bool {
} }
func (err ErrSHAOrCommitIDNotProvided) Error() string { func (err ErrSHAOrCommitIDNotProvided) Error() string {
return "a SHA or commmit ID must be proved when updating a file" return "a SHA or commit ID must be proved when updating a file"
} }
// __ __ ___. .__ __ // __ __ ___. .__ __

@ -622,7 +622,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
var err error var err error
// Find Committer account // Find Committer account
committer, err = GetUserByEmail(c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not committer, err = GetUserByEmail(c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
if err != nil { // Skipping not user for commiter if err != nil { // Skipping not user for committer
committer = &User{ committer = &User{
Name: c.Committer.Name, Name: c.Committer.Name,
Email: c.Committer.Email, Email: c.Committer.Email,
@ -698,7 +698,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
} }
for _, k := range keys { for _, k := range keys {
// Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate // Pre-check (& optimization) that emails attached to key can be attached to the committer email and can validate
canValidate := false canValidate := false
email := "" email := ""
for _, e := range k.Emails { for _, e := range k.Emails {

@ -184,7 +184,7 @@ type Comment struct {
RefRepoID int64 `xorm:"index"` // Repo where the referencing RefRepoID int64 `xorm:"index"` // Repo where the referencing
RefIssueID int64 `xorm:"index"` RefIssueID int64 `xorm:"index"`
RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's) RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
RefAction references.XRefAction `xorm:"SMALLINT"` // What hapens if RefIssueID resolves RefAction references.XRefAction `xorm:"SMALLINT"` // What happens if RefIssueID resolves
RefIsPull bool RefIsPull bool
RefRepo *Repository `xorm:"-"` RefRepo *Repository `xorm:"-"`
@ -1228,7 +1228,7 @@ func UpdateCommentsMigrationsByType(tp structs.GitServiceType, originalAuthorID
return err return err
} }
// CreatePushPullComment create push code to pull base commend // CreatePushPullComment create push code to pull base comment
func CreatePushPullComment(pusher *User, pr *PullRequest, oldCommitID, newCommitID string) (comment *Comment, err error) { func CreatePushPullComment(pusher *User, pr *PullRequest, oldCommitID, newCommitID string) (comment *Comment, err error) {
if pr.HasMerged || oldCommitID == "" || newCommitID == "" { if pr.HasMerged || oldCommitID == "" || newCommitID == "" {
return nil, nil return nil, nil
@ -1262,7 +1262,7 @@ func CreatePushPullComment(pusher *User, pr *PullRequest, oldCommitID, newCommit
return return
} }
// getCommitsFromRepo get commit IDs from repo in betwern oldCommitID and newCommitID // getCommitsFromRepo get commit IDs from repo in between oldCommitID and newCommitID
// isForcePush will be true if oldCommit isn't on the branch // isForcePush will be true if oldCommit isn't on the branch
// Commit on baseBranch will skip // Commit on baseBranch will skip
func getCommitIDsFromRepo(repo *Repository, oldCommitID, newCommitID, baseBranch string) (commitIDs []string, isForcePush bool, err error) { func getCommitIDsFromRepo(repo *Repository, oldCommitID, newCommitID, baseBranch string) (commitIDs []string, isForcePush bool, err error) {

@ -128,7 +128,7 @@ func issueNoDependenciesLeft(e Engine, issue *Issue) (bool, error) {
return !exists, err return !exists, err
} }
// IsDependenciesEnabled returns if dependecies are enabled and returns the default setting if not set. // IsDependenciesEnabled returns if dependencies are enabled and returns the default setting if not set.
func (repo *Repository) IsDependenciesEnabled() bool { func (repo *Repository) IsDependenciesEnabled() bool {
return repo.isDependenciesEnabled(x) return repo.isDependenciesEnabled(x)
} }

@ -135,7 +135,7 @@ func TestGetLabelsByRepoID(t *testing.T) {
testSuccess(1, "default", []int64{1, 2}) testSuccess(1, "default", []int64{1, 2})
} }
// Org vrsions // Org versions
func TestGetLabelInOrgByName(t *testing.T) { func TestGetLabelInOrgByName(t *testing.T) {
assert.NoError(t, PrepareTestDatabase()) assert.NoError(t, PrepareTestDatabase())

@ -343,7 +343,7 @@ func CreateLoginSource(source *LoginSource) error {
} else if has { } else if has {
return ErrLoginSourceAlreadyExist{source.Name} return ErrLoginSourceAlreadyExist{source.Name}
} }
// Synchronization is only aviable with LDAP for now // Synchronization is only available with LDAP for now
if !source.IsLDAP() { if !source.IsLDAP() {
source.IsSyncEnabled = false source.IsSyncEnabled = false
} }

@ -187,7 +187,7 @@ var migrations = []Migration{
// v122 -> v123 // v122 -> v123
NewMigration("Add Require Signed Commits to ProtectedBranch", addRequireSignedCommits), NewMigration("Add Require Signed Commits to ProtectedBranch", addRequireSignedCommits),
// v123 -> v124 // v123 -> v124
NewMigration("Add original informations for reactions", addReactionOriginals), NewMigration("Add original information for reactions", addReactionOriginals),
// v124 -> v125 // v124 -> v125
NewMigration("Add columns to user and repository", addUserRepoMissingColumns), NewMigration("Add columns to user and repository", addUserRepoMissingColumns),
// v125 -> v126 // v125 -> v126

@ -141,8 +141,8 @@ func addBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error {
return perm, err return perm, err
} }
// Prevent strangers from checking out public repo of private orginization // Prevent strangers from checking out public repo of private organization
// Allow user if they are collaborator of a repo within a private orginization but not a member of the orginization itself // Allow user if they are collaborator of a repo within a private organization but not a member of the organization itself
hasOrgVisible := true hasOrgVisible := true
// Not SignedUser // Not SignedUser
if user == nil { if user == nil {

@ -75,7 +75,7 @@ func createReviewsForCodeComments(x *xorm.Engine) error {
RefRepoID int64 `xorm:"index"` // Repo where the referencing RefRepoID int64 `xorm:"index"` // Repo where the referencing
RefIssueID int64 `xorm:"index"` RefIssueID int64 `xorm:"index"`
RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's) RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
RefAction int `xorm:"SMALLINT"` // What hapens if RefIssueID resolves RefAction int `xorm:"SMALLINT"` // What happens if RefIssueID resolves
RefIsPull bool RefIsPull bool
} }

@ -210,7 +210,7 @@ func UpdateOAuth2Application(opts UpdateOAuth2ApplicationOptions) (*OAuth2Applic
return nil, err return nil, err
} }
if app.UID != opts.UserID { if app.UID != opts.UserID {
return nil, fmt.Errorf("UID missmatch") return nil, fmt.Errorf("UID mismatch")
} }
app.Name = opts.Name app.Name = opts.Name
@ -376,7 +376,7 @@ func getOAuth2AuthorizationByCode(e Engine, code string) (auth *OAuth2Authorizat
////////////////////////////////////////////////////// //////////////////////////////////////////////////////
// OAuth2Grant represents the permission of an user for a specifc application to access resources // OAuth2Grant represents the permission of an user for a specific application to access resources
type OAuth2Grant struct { type OAuth2Grant struct {
ID int64 `xorm:"pk autoincr"` ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"INDEX unique(user_application)"` UserID int64 `xorm:"INDEX unique(user_application)"`
@ -537,7 +537,7 @@ type OAuth2Token struct {
jwt.StandardClaims jwt.StandardClaims
} }
// ParseOAuth2Token parses a singed jwt string // ParseOAuth2Token parses a signed jwt string
func ParseOAuth2Token(jwtToken string) (*OAuth2Token, error) { func ParseOAuth2Token(jwtToken string) (*OAuth2Token, error) {
parsedToken, err := jwt.ParseWithClaims(jwtToken, &OAuth2Token{}, func(token *jwt.Token) (interface{}, error) { parsedToken, err := jwt.ParseWithClaims(jwtToken, &OAuth2Token{}, func(token *jwt.Token) (interface{}, error) {
if token.Method == nil || token.Method.Alg() != oauth2.DefaultSigningKey.SigningMethod().Alg() { if token.Method == nil || token.Method.Alg() != oauth2.DefaultSigningKey.SigningMethod().Alg() {

@ -79,7 +79,7 @@ func (org *User) GetMembers() (err error) {
return return
} }
// FindOrgMembersOpts represensts find org members condtions // FindOrgMembersOpts represensts find org members conditions
type FindOrgMembersOpts struct { type FindOrgMembersOpts struct {
ListOptions ListOptions
OrgID int64 OrgID int64

@ -1616,7 +1616,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
sess.Close() sess.Close()
// We should always delete the files after the database transaction succeed. If // We should always delete the files after the database transaction succeed. If
// we delete the file but the database rollback, the repository will be borken. // we delete the file but the database rollback, the repository will be broken.
// Remove issue attachment files. // Remove issue attachment files.
for i := range attachmentPaths { for i := range attachmentPaths {

@ -148,11 +148,11 @@ type SearchRepoOptions struct {
AllLimited bool // Include also all public repositories of limited organisations AllLimited bool // Include also all public repositories of limited organisations
// None -> include public and private // None -> include public and private
// True -> include just private // True -> include just private
// False -> incude just public // False -> include just public
IsPrivate util.OptionalBool IsPrivate util.OptionalBool
// None -> include collaborative AND non-collaborative // None -> include collaborative AND non-collaborative
// True -> include just collaborative // True -> include just collaborative
// False -> incude just non-collaborative // False -> include just non-collaborative
Collaborate util.OptionalBool Collaborate util.OptionalBool
// None -> include forks AND non-forks // None -> include forks AND non-forks
// True -> include just forks // True -> include just forks

@ -176,7 +176,7 @@ func getUserRepoPermission(e Engine, repo *Repository, user *User) (perm Permiss
return return
} }
// Prevent strangers from checking out public repo of private orginization/users // Prevent strangers from checking out public repo of private organization/users
// Allow user if they are collaborator of a repo within a private user or a private organization but not a member of the organization itself // Allow user if they are collaborator of a repo within a private user or a private organization but not a member of the organization itself
if !hasOrgOrUserVisible(e, repo.Owner, user) && !isCollaborator { if !hasOrgOrUserVisible(e, repo.Owner, user) && !isCollaborator {
perm.AccessMode = AccessModeNone perm.AccessMode = AccessModeNone
@ -351,7 +351,7 @@ func hasAccessUnit(e Engine, user *User, repo *Repository, unitType UnitType, te
return testMode <= mode, err return testMode <= mode, err
} }
// HasAccessUnit returns ture if user has testMode to the unit of the repository // HasAccessUnit returns true if user has testMode to the unit of the repository
func HasAccessUnit(user *User, repo *Repository, unitType UnitType, testMode AccessMode) (bool, error) { func HasAccessUnit(user *User, repo *Repository, unitType UnitType, testMode AccessMode) (bool, error) {
return hasAccessUnit(x, user, repo, unitType, testMode) return hasAccessUnit(x, user, repo, unitType, testMode)
} }

@ -91,7 +91,7 @@ func GetPushMirrorByID(ID int64) (*PushMirror, error) {
return m, nil return m, nil
} }
// GetPushMirrorsByRepoID returns push-mirror informations of a repository. // GetPushMirrorsByRepoID returns push-mirror information of a repository.
func GetPushMirrorsByRepoID(repoID int64) ([]*PushMirror, error) { func GetPushMirrorsByRepoID(repoID int64) ([]*PushMirror, error) {
mirrors := make([]*PushMirror, 0, 10) mirrors := make([]*PushMirror, 0, 10)
return mirrors, x.Where("repo_id=?", repoID).Find(&mirrors) return mirrors, x.Where("repo_id=?", repoID).Find(&mirrors)

@ -466,7 +466,7 @@ func GetReviewersByIssueID(issueID int64) ([]*Review, error) {
return nil, err return nil, err
} }
// Get latest review of each reviwer, sorted in order they were made // Get latest review of each reviewer, sorted in order they were made
if err := sess.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND dismissed = ? AND original_author_id = 0 GROUP BY issue_id, reviewer_id) ORDER BY review.updated_unix ASC", if err := sess.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND dismissed = ? AND original_author_id = 0 GROUP BY issue_id, reviewer_id) ORDER BY review.updated_unix ASC",
issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest, false). issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest, false).
Find(&reviews); err != nil { Find(&reviews); err != nil {
@ -491,7 +491,7 @@ func GetReviewersByIssueID(issueID int64) ([]*Review, error) {
func GetReviewersFromOriginalAuthorsByIssueID(issueID int64) ([]*Review, error) { func GetReviewersFromOriginalAuthorsByIssueID(issueID int64) ([]*Review, error) {
reviews := make([]*Review, 0, 10) reviews := make([]*Review, 0, 10)
// Get latest review of each reviwer, sorted in order they were made // Get latest review of each reviewer, sorted in order they were made
if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND original_author_id <> 0 GROUP BY issue_id, original_author_id) ORDER BY review.updated_unix ASC", if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND original_author_id <> 0 GROUP BY issue_id, original_author_id) ORDER BY review.updated_unix ASC",
issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest). issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest).
Find(&reviews); err != nil { Find(&reviews); err != nil {

@ -195,7 +195,7 @@ func FindTopics(opts *FindTopicOptions) (topics []*Topic, err error) {
return topics, sess.Desc("topic.repo_count").Find(&topics) return topics, sess.Desc("topic.repo_count").Find(&topics)
} }
// GetRepoTopicByName retrives topic from name for a repo if it exist // GetRepoTopicByName retrieves topic from name for a repo if it exist
func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) { func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) {
return getRepoTopicByName(x, repoID, topicName) return getRepoTopicByName(x, repoID, topicName)
} }

@ -1631,7 +1631,7 @@ func (opts *SearchUserOptions) toConds() builder.Cond {
// If Admin - they see all users! // If Admin - they see all users!
if !opts.Actor.IsAdmin { if !opts.Actor.IsAdmin {
// Force visiblity for privacy // Force visibility for privacy
var accessCond builder.Cond var accessCond builder.Cond
if !opts.Actor.IsRestricted { if !opts.Actor.IsRestricted {
accessCond = builder.Or( accessCond = builder.Or(
@ -1647,7 +1647,7 @@ func (opts *SearchUserOptions) toConds() builder.Cond {
} }
} else { } else {
// Force visiblity for privacy // Force visibility for privacy
// Not logged in - only public users // Not logged in - only public users
cond = cond.And(builder.In("visibility", structs.VisibleTypePublic)) cond = cond.And(builder.In("visibility", structs.VisibleTypePublic))
} }

@ -269,7 +269,7 @@ func MakeEmailPrimary(email *EmailAddress) error {
return err return err
} }
// 3. update new primay email // 3. update new primary email
email.IsPrimary = true email.IsPrimary = true
if _, err = sess.ID(email.ID).Cols("is_primary").Update(email); err != nil { if _, err = sess.ID(email.ID).Cols("is_primary").Update(email); err != nil {
return err return err

@ -38,7 +38,7 @@ var IssueTemplateDirCandidates = []string{
".gitlab/issue_template", ".gitlab/issue_template",
} }
// PullRequest contains informations to make a pull request // PullRequest contains information to make a pull request
type PullRequest struct { type PullRequest struct {
BaseRepo *models.Repository BaseRepo *models.Repository
Allowed bool Allowed bool

@ -34,7 +34,7 @@ type BlameReader struct {
var shaLineRegex = regexp.MustCompile("^([a-z0-9]{40})") var shaLineRegex = regexp.MustCompile("^([a-z0-9]{40})")
// NextPart returns next part of blame (sequencial code lines with the same commit) // NextPart returns next part of blame (sequential code lines with the same commit)
func (r *BlameReader) NextPart() (*BlamePart, error) { func (r *BlameReader) NextPart() (*BlamePart, error) {
var blamePart *BlamePart var blamePart *BlamePart

@ -110,7 +110,7 @@ func (repo *Repository) searchCommits(id SHA1, opts SearchCommitsOptions) (*list
} }
} }
// add commiters if present in search query // add committers if present in search query
if len(opts.Committers) > 0 { if len(opts.Committers) > 0 {
for _, v := range opts.Committers { for _, v := range opts.Committers {
args = append(args, "--committer="+v) args = append(args, "--committer="+v)
@ -150,7 +150,7 @@ func (repo *Repository) searchCommits(id SHA1, opts SearchCommitsOptions) (*list
stdout = append(stdout, '\n') stdout = append(stdout, '\n')
} }
// if there are any keywords (ie not commiter:, author:, time:) // if there are any keywords (ie not committer:, author:, time:)
// then let's iterate over them // then let's iterate over them
if len(opts.Keywords) > 0 { if len(opts.Keywords) > 0 {
for _, v := range opts.Keywords { for _, v := range opts.Keywords {
@ -195,12 +195,12 @@ func (repo *Repository) FileChangedBetweenCommits(filename, id1, id2 string) (bo
return len(strings.TrimSpace(string(stdout))) > 0, nil return len(strings.TrimSpace(string(stdout))) > 0, nil
} }
// FileCommitsCount return the number of files at a revison // FileCommitsCount return the number of files at a revision
func (repo *Repository) FileCommitsCount(revision, file string) (int64, error) { func (repo *Repository) FileCommitsCount(revision, file string) (int64, error) {
return CommitsCountFiles(repo.Path, []string{revision}, []string{file}) return CommitsCountFiles(repo.Path, []string{revision}, []string{file})
} }
// CommitsByFileAndRange return the commits according revison file and the page // CommitsByFileAndRange return the commits according revision file and the page
func (repo *Repository) CommitsByFileAndRange(revision, file string, page int) (*list.List, error) { func (repo *Repository) CommitsByFileAndRange(revision, file string, page int) (*list.List, error) {
skip := (page - 1) * setting.Git.CommitsRangeSize skip := (page - 1) * setting.Git.CommitsRangeSize
@ -240,7 +240,7 @@ func (repo *Repository) CommitsByFileAndRange(revision, file string, page int) (
return repo.parsePrettyFormatLogToList(stdout) return repo.parsePrettyFormatLogToList(stdout)
} }
// CommitsByFileAndRangeNoFollow return the commits according revison file and the page // CommitsByFileAndRangeNoFollow return the commits according revision file and the page
func (repo *Repository) CommitsByFileAndRangeNoFollow(revision, file string, page int) (*list.List, error) { func (repo *Repository) CommitsByFileAndRangeNoFollow(revision, file string, page int) (*list.List, error) {
stdout, err := NewCommand("log", revision, "--skip="+strconv.Itoa((page-1)*50), stdout, err := NewCommand("log", revision, "--skip="+strconv.Itoa((page-1)*50),
"--max-count="+strconv.Itoa(setting.Git.CommitsRangeSize), prettyLogFormat, "--", file).RunInDirBytes(repo.Path) "--max-count="+strconv.Itoa(setting.Git.CommitsRangeSize), prettyLogFormat, "--", file).RunInDirBytes(repo.Path)

@ -33,7 +33,7 @@ type CodeActivityAuthor struct {
Commits int64 Commits int64
} }
// GetCodeActivityStats returns code statistics for acitivity page // GetCodeActivityStats returns code statistics for activity page
func (repo *Repository) GetCodeActivityStats(fromTime time.Time, branch string) (*CodeActivityStats, error) { func (repo *Repository) GetCodeActivityStats(fromTime time.Time, branch string) (*CodeActivityStats, error) {
stats := &CodeActivityStats{} stats := &CodeActivityStats{}

@ -13,7 +13,7 @@ import (
"sync" "sync"
) )
// ObjectCache provides thread-safe cache opeations. // ObjectCache provides thread-safe cache operations.
type ObjectCache struct { type ObjectCache struct {
lock sync.RWMutex lock sync.RWMutex
cache map[string]interface{} cache map[string]interface{}

@ -284,7 +284,7 @@ func (b *ElasticSearchIndexer) Delete(repoID int64) error {
} }
// indexPos find words positions for start and the following end on content. It will // indexPos find words positions for start and the following end on content. It will
// return the beginning position of the frist start and the ending position of the // return the beginning position of the first start and the ending position of the
// first end following the start string. // first end following the start string.
// If not found any of the positions, it will return -1, -1. // If not found any of the positions, it will return -1, -1.
func indexPos(content, start, end string) (int, int) { func indexPos(content, start, end string) (int, int) {
@ -309,7 +309,7 @@ func convertResult(searchResult *elastic.SearchResult, kw string, pageSize int)
c, ok := hit.Highlight["content"] c, ok := hit.Highlight["content"]
if ok && len(c) > 0 { if ok && len(c) > 0 {
// FIXME: Since the highlighting content will include <em> and </em> for the keywords, // FIXME: Since the highlighting content will include <em> and </em> for the keywords,
// now we should find the poisitions. But how to avoid html content which contains the // now we should find the positions. But how to avoid html content which contains the
// <em> and </em> tags? If elastic search has handled that? // <em> and </em> tags? If elastic search has handled that?
startIndex, endIndex = indexPos(c[0], "<em>", "</em>") startIndex, endIndex = indexPos(c[0], "<em>", "</em>")
if startIndex == -1 { if startIndex == -1 {

@ -49,7 +49,7 @@ type ObjectResponse struct {
Error *ObjectError `json:"error,omitempty"` Error *ObjectError `json:"error,omitempty"`
} }
// Link provides a structure with informations about how to access a object. // Link provides a structure with information about how to access a object.
type Link struct { type Link struct {
Href string `json:"href"` Href string `json:"href"`
Header map[string]string `json:"header,omitempty"` Header map[string]string `json:"header,omitempty"`

@ -19,7 +19,7 @@ var statusToColor = map[int][]byte{
500: ColorBytes(Bold, BgRed), 500: ColorBytes(Bold, BgRed),
} }
// ColoredStatus addes colors for HTTP status // ColoredStatus adds colors for HTTP status
func ColoredStatus(status int, s ...string) *ColoredValue { func ColoredStatus(status int, s ...string) *ColoredValue {
color, ok := statusToColor[status] color, ok := statusToColor[status]
if !ok { if !ok {
@ -43,7 +43,7 @@ var methodToColor = map[string][]byte{
"HEAD": ColorBytes(FgBlue, Faint), "HEAD": ColorBytes(FgBlue, Faint),
} }
// ColoredMethod addes colors for HtTP methos on log // ColoredMethod adds colors for HTTP methods on log
func ColoredMethod(method string) *ColoredValue { func ColoredMethod(method string) *ColoredValue {
color, ok := methodToColor[method] color, ok := methodToColor[method]
if !ok { if !ok {
@ -72,7 +72,7 @@ var (
wayTooLong = ColorBytes(BgMagenta) wayTooLong = ColorBytes(BgMagenta)
) )
// ColoredTime addes colors for time on log // ColoredTime adds colors for time on log
func ColoredTime(duration time.Duration) *ColoredValue { func ColoredTime(duration time.Duration) *ColoredValue {
for i, k := range durations { for i, k := range durations {
if duration < k { if duration < k {

@ -274,7 +274,7 @@ func RenderDescriptionHTML(
} }
// RenderEmoji for when we want to just process emoji and shortcodes // RenderEmoji for when we want to just process emoji and shortcodes
// in various places it isn't already run through the normal markdown procesor // in various places it isn't already run through the normal markdown processor
func RenderEmoji( func RenderEmoji(
content string, content string,
) (string, error) { ) (string, error) {

@ -74,7 +74,7 @@ func IsSummary(node ast.Node) bool {
return ok return ok
} }
// TaskCheckBoxListItem is a block that repressents a list item of a markdown block with a checkbox // TaskCheckBoxListItem is a block that represents a list item of a markdown block with a checkbox
type TaskCheckBoxListItem struct { type TaskCheckBoxListItem struct {
*ast.ListItem *ast.ListItem
IsChecked bool IsChecked bool

@ -25,7 +25,7 @@ func TestMarkdownStripper(t *testing.T) {
This is [one](link) to paradise. This is [one](link) to paradise.
This **is emphasized**. This **is emphasized**.
This: should coallesce. This: should coalesce.
` + "```" + ` ` + "```" + `
This is a code block. This is a code block.
@ -44,7 +44,7 @@ A HIDDEN ` + "`" + `GHOST` + "`" + ` IN THIS LINE.
"This", "This",
"is emphasized", "is emphasized",
".", ".",
"This: should coallesce.", "This: should coalesce.",
"Bullet 1", "Bullet 1",
"Bullet 2", "Bullet 2",
"A HIDDEN", "A HIDDEN",

@ -18,7 +18,7 @@ type GetCommentOptions struct {
PageSize int PageSize int
} }
// Downloader downloads the site repo informations // Downloader downloads the site repo information
type Downloader interface { type Downloader interface {
SetContext(context.Context) SetContext(context.Context)
GetRepoInfo() (*Repository, error) GetRepoInfo() (*Repository, error)

@ -5,7 +5,7 @@
package base package base
// Label defines a standard label informations // Label defines a standard label information
type Label struct { type Label struct {
Name string Name string
Color string Color string

@ -5,7 +5,7 @@
package base package base
// Uploader uploads all the informations of one repository // Uploader uploads all the information of one repository
type Uploader interface { type Uploader interface {
MaxBatchInsertSize(tp string) int MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error CreateRepo(repo *Repository, opts MigrateOptions) error

@ -61,7 +61,7 @@ func (f *GithubDownloaderV3Factory) GitServiceType() structs.GitServiceType {
return structs.GithubService return structs.GithubService
} }
// GithubDownloaderV3 implements a Downloader interface to get repository informations // GithubDownloaderV3 implements a Downloader interface to get repository information
// from github via APIv3 // from github via APIv3
type GithubDownloaderV3 struct { type GithubDownloaderV3 struct {
base.NullDownloader base.NullDownloader

@ -56,7 +56,7 @@ func (f *GitlabDownloaderFactory) GitServiceType() structs.GitServiceType {
return structs.GitlabService return structs.GitlabService
} }
// GitlabDownloader implements a Downloader interface to get repository informations // GitlabDownloader implements a Downloader interface to get repository information
// from gitlab via go-gitlab // from gitlab via go-gitlab
// - issueCount is incremented in GetIssues() to ensure PR and Issue numbers do not overlap, // - issueCount is incremented in GetIssues() to ensure PR and Issue numbers do not overlap,
// because Gitlab has individual Issue and Pull Request numbers. // because Gitlab has individual Issue and Pull Request numbers.

@ -57,7 +57,7 @@ func (f *GogsDownloaderFactory) GitServiceType() structs.GitServiceType {
return structs.GogsService return structs.GogsService
} }
// GogsDownloader implements a Downloader interface to get repository informations // GogsDownloader implements a Downloader interface to get repository information
// from gogs via API // from gogs via API
type GogsDownloader struct { type GogsDownloader struct {
base.NullDownloader base.NullDownloader

@ -143,7 +143,7 @@ func (*NullNotifier) NotifyPushCommits(pusher *models.User, repo *models.Reposit
func (*NullNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType, refFullName string) { func (*NullNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType, refFullName string) {
} }
// NotifyDeleteRef notifies branch or tag deleteion to notifiers // NotifyDeleteRef notifies branch or tag deletion to notifiers
func (*NullNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) { func (*NullNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) {
} }

@ -481,7 +481,7 @@ func TestParseCloseKeywords(t *testing.T) {
{",$!", "", ""}, {",$!", "", ""},
{"1234", "", ""}, {"1234", "", ""},
} { } {
// The patern only needs to match the part that precedes the reference. // The pattern only needs to match the part that precedes the reference.
// getCrossReference() takes care of finding the reference itself. // getCrossReference() takes care of finding the reference itself.
pat := makeKeywordsPat([]string{test.pattern}) pat := makeKeywordsPat([]string{test.pattern})
if test.expected == "" { if test.expected == "" {

@ -250,7 +250,7 @@ func TestUpdateIssuesCommit_AnotherRepoNoPermission(t *testing.T) {
user := models.AssertExistsAndLoadBean(t, &models.User{ID: 10}).(*models.User) user := models.AssertExistsAndLoadBean(t, &models.User{ID: 10}).(*models.User)
// Test that a push with close reference *can not* close issue // Test that a push with close reference *can not* close issue
// If the commiter doesn't have push rights in that repo // If the committer doesn't have push rights in that repo
pushCommits := []*repository.PushCommit{ pushCommits := []*repository.PushCommit{
{ {
Sha1: "abcdef3", Sha1: "abcdef3",

@ -69,7 +69,7 @@ func TestGetContents(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("Get REAMDE.md contents with ref as empty string (should then use the repo's default branch) with GetContents()", func(t *testing.T) { t.Run("Get README.md contents with ref as empty string (should then use the repo's default branch) with GetContents()", func(t *testing.T) {
fileContentResponse, err := GetContents(ctx.Repo.Repository, treePath, "", false) fileContentResponse, err := GetContents(ctx.Repo.Repository, treePath, "", false)
assert.EqualValues(t, expectedContentsResponse, fileContentResponse) assert.EqualValues(t, expectedContentsResponse, fileContentResponse)
assert.NoError(t, err) assert.NoError(t, err)
@ -132,7 +132,7 @@ func TestGetContentsOrListForFile(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("Get REAMDE.md contents with ref as empty string (should then use the repo's default branch) with GetContentsOrList()", func(t *testing.T) { t.Run("Get README.md contents with ref as empty string (should then use the repo's default branch) with GetContentsOrList()", func(t *testing.T) {
fileContentResponse, err := GetContentsOrList(ctx.Repo.Repository, treePath, "") fileContentResponse, err := GetContentsOrList(ctx.Repo.Repository, treePath, "")
assert.EqualValues(t, expectedContentsResponse, fileContentResponse) assert.EqualValues(t, expectedContentsResponse, fileContentResponse)
assert.NoError(t, err) assert.NoError(t, err)

@ -129,12 +129,12 @@ func ListUnadoptedRepositories(query string, opts *models.ListOptions) ([]string
var err error var err error
globUser, err = glob.Compile(qsplit[0]) globUser, err = glob.Compile(qsplit[0])
if err != nil { if err != nil {
log.Info("Invalid glob expresion '%s' (skipped): %v", qsplit[0], err) log.Info("Invalid glob expression '%s' (skipped): %v", qsplit[0], err)
} }
if len(qsplit) > 1 { if len(qsplit) > 1 {
globRepo, err = glob.Compile(qsplit[1]) globRepo, err = glob.Compile(qsplit[1])
if err != nil { if err != nil {
log.Info("Invalid glob expresion '%s' (skipped): %v", qsplit[1], err) log.Info("Invalid glob expression '%s' (skipped): %v", qsplit[1], err)
} }
} }
} }

@ -17,7 +17,7 @@ import (
"code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/modules/util"
) )
// New creats a new secret // New creates a new secret
func New() (string, error) { func New() (string, error) {
return NewWithLength(44) return NewWithLength(44)
} }

@ -101,7 +101,7 @@ func IndexerGlobFromString(globstr string) []glob.Glob {
expr = strings.TrimSpace(expr) expr = strings.TrimSpace(expr)
if expr != "" { if expr != "" {
if g, err := glob.Compile(expr, '.', '/'); err != nil { if g, err := glob.Compile(expr, '.', '/'); err != nil {
log.Info("Invalid glob expresion '%s' (skipped): %v", expr, err) log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
} else { } else {
extarr = append(extarr, g) extarr = append(extarr, g)
} }

@ -15,7 +15,7 @@ import (
) )
var ( var (
// SessionConfig difines Session settings // SessionConfig defines Session settings
SessionConfig = struct { SessionConfig = struct {
Provider string Provider string
// Provider configuration, it's corresponding to provider. // Provider configuration, it's corresponding to provider.

@ -71,7 +71,7 @@ type ObjectStorage interface {
IterateObjects(func(path string, obj Object) error) error IterateObjects(func(path string, obj Object) error) error
} }
// Copy copys a file from source ObjectStorage to dest ObjectStorage // Copy copies a file from source ObjectStorage to dest ObjectStorage
func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) { func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) {
f, err := srcStorage.Open(srcPath) f, err := srcStorage.Open(srcPath)
if err != nil { if err != nil {

@ -74,7 +74,7 @@ func runMigrateTask(t *models.Task) (err error) {
return return
} }
// if repository is ready, then just finsih the task // if repository is ready, then just finish the task
if t.Repo.Status == models.RepositoryReady { if t.Repo.Status == models.RepositoryReady {
return nil return nil
} }

@ -21,7 +21,7 @@ const SvgMimeType = "image/svg+xml"
var svgTagRegex = regexp.MustCompile(`(?si)\A\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`) var svgTagRegex = regexp.MustCompile(`(?si)\A\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`)
var svgTagInXMLRegex = regexp.MustCompile(`(?si)\A<\?xml\b.*?\?>\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`) var svgTagInXMLRegex = regexp.MustCompile(`(?si)\A<\?xml\b.*?\?>\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`)
// SniffedType contains informations about a blobs type. // SniffedType contains information about a blobs type.
type SniffedType struct { type SniffedType struct {
contentType string contentType string
} }

@ -154,7 +154,7 @@ func StatDir(rootPath string, includeDir ...bool) ([]string, error) {
return statDir(rootPath, "", isIncludeDir, false, false) return statDir(rootPath, "", isIncludeDir, false, false)
} }
// FileURLToPath extracts the path informations from a file://... url. // FileURLToPath extracts the path information from a file://... url.
func FileURLToPath(u *url.URL) (string, error) { func FileURLToPath(u *url.URL) (string, error) {
if u.Scheme != "file" { if u.Scheme != "file" {
return "", errors.New("URL scheme is not 'file': " + u.String()) return "", errors.New("URL scheme is not 'file': " + u.String())

@ -33,7 +33,7 @@ func TestShellEscape(t *testing.T) {
"~git/Gitea v1.13/gitea", "~git/Gitea v1.13/gitea",
`~git/"Gitea v1.13/gitea"`, `~git/"Gitea v1.13/gitea"`,
}, { }, {
"Bangs are unforutunately not predictable so need to be singlequoted", "Bangs are unfortunately not predictable so need to be singlequoted",
"C:/Program Files/Gitea!/gitea", "C:/Program Files/Gitea!/gitea",
`'C:/Program Files/Gitea!/gitea'`, `'C:/Program Files/Gitea!/gitea'`,
}, { }, {
@ -41,7 +41,7 @@ func TestShellEscape(t *testing.T) {
"/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea", "/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea",
"'/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea'", "'/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea'",
}, { }, {
"Similarly we should nicely handle mutiple single quotes if we have to single-quote", "Similarly we should nicely handle multiple single quotes if we have to single-quote",
"'!''!'''!''!'!'", "'!''!'''!''!'!'",
`\''!'\'\''!'\'\'\''!'\'\''!'\''!'\'`, `\''!'\'\''!'\'\'\''!'\'\''!'\''!'\'`,
}, { }, {

@ -92,7 +92,7 @@ func IsValidExternalURL(uri string) bool {
return false return false
} }
// TODO: Later it should be added to allow local network IP addreses // TODO: Later it should be added to allow local network IP addresses
// only if allowed by special setting // only if allowed by special setting
return true return true

@ -24,12 +24,12 @@ func Test_IsValidURL(t *testing.T) {
valid: false, valid: false,
}, },
{ {
description: "Loobpack IPv4 URL", description: "Loopback IPv4 URL",
url: "http://127.0.1.1:5678/", url: "http://127.0.1.1:5678/",
valid: true, valid: true,
}, },
{ {
description: "Loobpack IPv6 URL", description: "Loopback IPv6 URL",
url: "https://[::1]/", url: "https://[::1]/",
valid: true, valid: true,
}, },
@ -61,7 +61,7 @@ func Test_IsValidExternalURL(t *testing.T) {
valid: true, valid: true,
}, },
{ {
description: "Loobpack IPv4 URL", description: "Loopback IPv4 URL",
url: "http://127.0.1.1:5678/", url: "http://127.0.1.1:5678/",
valid: false, valid: false,
}, },

@ -12,7 +12,7 @@ import (
var gitRefNameValidationTestCases = []validationTestCase{ var gitRefNameValidationTestCases = []validationTestCase{
{ {
description: "Referece contains only characters", description: "Reference name contains only characters",
data: TestForm{ data: TestForm{
BranchName: "test", BranchName: "test",
}, },

@ -526,7 +526,7 @@ func ListTrackedTimesByRepository(ctx *context.APIContext) {
if opts.UserID == 0 { if opts.UserID == 0 {
opts.UserID = ctx.User.ID opts.UserID = ctx.User.ID
} else { } else {
ctx.Error(http.StatusForbidden, "", fmt.Errorf("query user not allowed not enouth rights")) ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
return return
} }
} }

@ -166,7 +166,7 @@ func DeleteAccessToken(ctx *context.APIContext) {
case 1: case 1:
tokenID = tokens[0].ID tokenID = tokens[0].ID
default: default:
ctx.Error(http.StatusUnprocessableEntity, "DeleteAccessTokenByID", fmt.Errorf("multible matches for token name '%s'", token)) ctx.Error(http.StatusUnprocessableEntity, "DeleteAccessTokenByID", fmt.Errorf("multiple matches for token name '%s'", token))
return return
} }
} }

@ -205,7 +205,7 @@ func prepareUserInfo(ctx *context.Context) *models.User {
return u return u
} }
// EditUser show editting user page // EditUser show editing user page
func EditUser(ctx *context.Context) { func EditUser(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.users.edit_account") ctx.Data["Title"] = ctx.Tr("admin.users.edit_account")
ctx.Data["PageIsAdmin"] = true ctx.Data["PageIsAdmin"] = true
@ -222,7 +222,7 @@ func EditUser(ctx *context.Context) {
ctx.HTML(http.StatusOK, tplUserEdit) ctx.HTML(http.StatusOK, tplUserEdit)
} }
// EditUserPost response for editting user // EditUserPost response for editing user
func EditUserPost(ctx *context.Context) { func EditUserPost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.AdminEditUserForm) form := web.GetForm(ctx).(*forms.AdminEditUserForm)
ctx.Data["Title"] = ctx.Tr("admin.users.edit_account") ctx.Data["Title"] = ctx.Tr("admin.users.edit_account")

@ -39,7 +39,7 @@ func Settings(ctx *context.Context) {
ctx.HTML(http.StatusOK, tplSettingsOptions) ctx.HTML(http.StatusOK, tplSettingsOptions)
} }
// SettingsPost response for settings change submited // SettingsPost response for settings change submitted
func SettingsPost(ctx *context.Context) { func SettingsPost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.UpdateOrgSettingForm) form := web.GetForm(ctx).(*forms.UpdateOrgSettingForm)
ctx.Data["Title"] = ctx.Tr("org.settings") ctx.Data["Title"] = ctx.Tr("org.settings")
@ -139,7 +139,7 @@ func SettingsAvatar(ctx *context.Context) {
ctx.Redirect(ctx.Org.OrgLink + "/settings") ctx.Redirect(ctx.Org.OrgLink + "/settings")
} }
// SettingsDeleteAvatar response for delete avatar on setings page // SettingsDeleteAvatar response for delete avatar on settings page
func SettingsDeleteAvatar(ctx *context.Context) { func SettingsDeleteAvatar(ctx *context.Context) {
if err := ctx.Org.Organization.DeleteAvatar(); err != nil { if err := ctx.Org.Organization.DeleteAvatar(); err != nil {
ctx.Flash.Error(err.Error()) ctx.Flash.Error(err.Error())

@ -32,7 +32,7 @@ import (
repo_service "code.gitea.io/gitea/services/repository" repo_service "code.gitea.io/gitea/services/repository"
) )
// httpBase implmentation git smart HTTP protocol // httpBase implementation git smart HTTP protocol
func httpBase(ctx *context.Context) (h *serviceHandler) { func httpBase(ctx *context.Context) (h *serviceHandler) {
if setting.Repository.DisableHTTPGit { if setting.Repository.DisableHTTPGit {
ctx.Resp.WriteHeader(http.StatusForbidden) ctx.Resp.WriteHeader(http.StatusForbidden)

@ -831,7 +831,7 @@ func NewIssueChooseTemplate(ctx *context.Context) {
ctx.HTML(http.StatusOK, tplIssueChoose) ctx.HTML(http.StatusOK, tplIssueChoose)
} }
// ValidateRepoMetas check and returns repository's meta informations // ValidateRepoMetas check and returns repository's meta information
func ValidateRepoMetas(ctx *context.Context, form forms.CreateIssueForm, isPull bool) ([]int64, []int64, int64, int64) { func ValidateRepoMetas(ctx *context.Context, form forms.CreateIssueForm, isPull bool) ([]int64, []int64, int64, int64) {
var ( var (
repo = ctx.Repo.Repository repo = ctx.Repo.Repository

@ -751,7 +751,7 @@ func renderCode(ctx *context.Context) {
ctx.HTML(http.StatusOK, tplRepoHome) ctx.HTML(http.StatusOK, tplRepoHome)
} }
// RenderUserCards render a page show users according the input templaet // RenderUserCards render a page show users according the input template
func RenderUserCards(ctx *context.Context, total int, getter func(opts models.ListOptions) ([]*models.User, error), tpl base.TplName) { func RenderUserCards(ctx *context.Context, total int, getter func(opts models.ListOptions) ([]*models.User, error), tpl base.TplName) {
page := ctx.QueryInt("page") page := ctx.QueryInt("page")
if page <= 0 { if page <= 0 {

@ -1473,7 +1473,7 @@ func ActivateEmail(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL + "/user/settings/account") ctx.Redirect(setting.AppSubURL + "/user/settings/account")
} }
// ForgotPasswd render the forget pasword page // ForgotPasswd render the forget password page
func ForgotPasswd(ctx *context.Context) { func ForgotPasswd(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("auth.forgot_password_title") ctx.Data["Title"] = ctx.Tr("auth.forgot_password_title")

@ -146,7 +146,7 @@ func Routes() *web.Route {
routes.Get("/metrics", append(common, Metrics)...) routes.Get("/metrics", append(common, Metrics)...)
} }
// Removed: toolbox.Toolboxer middleware will provide debug informations which seems unnecessary // Removed: toolbox.Toolboxer middleware will provide debug information which seems unnecessary
common = append(common, context.Contexter()) common = append(common, context.Contexter())
// Get user from session if logged in. // Get user from session if logged in.

@ -73,7 +73,7 @@ func (s *SSPI) Free() error {
} }
// Verify uses SSPI (Windows implementation of SPNEGO) to authenticate the request. // Verify uses SSPI (Windows implementation of SPNEGO) to authenticate the request.
// If authentication is successful, returs the corresponding user object. // If authentication is successful, returns the corresponding user object.
// If negotiation should continue or authentication fails, immediately returns a 401 HTTP // If negotiation should continue or authentication fails, immediately returns a 401 HTTP
// response code, as required by the SPNEGO protocol. // response code, as required by the SPNEGO protocol.
func (s *SSPI) Verify(req *http.Request, w http.ResponseWriter, store DataStore, sess SessionStore) *models.User { func (s *SSPI) Verify(req *http.Request, w http.ResponseWriter, store DataStore, sess SessionStore) *models.User {

@ -25,7 +25,7 @@ func DeleteNotPassedAssignee(issue *models.Issue, doer *models.User, assignees [
} }
if !found { if !found {
// This function also does comments and hooks, which is why we call it seperatly instead of directly removing the assignees here // This function also does comments and hooks, which is why we call it separately instead of directly removing the assignees here
if _, _, err := ToggleAssignee(issue, doer, assignee.ID); err != nil { if _, _, err := ToggleAssignee(issue, doer, assignee.ID); err != nil {
return err return err
} }

@ -304,7 +304,7 @@ var Sender gomail.Sender
// NewContext start mail queue service // NewContext start mail queue service
func NewContext() { func NewContext() {
// Need to check if mailQueue is nil because in during reinstall (user had installed // Need to check if mailQueue is nil because in during reinstall (user had installed
// before but swithed install lock off), this function will be called again // before but switched install lock off), this function will be called again
// while mail queue is already processing tasks, and produces a race condition. // while mail queue is already processing tasks, and produces a race condition.
if setting.MailService == nil || mailQueue != nil { if setting.MailService == nil || mailQueue != nil {
return return

@ -140,7 +140,7 @@ func TestRepository_AddWikiPage(t *testing.T) {
wikiPath := NameToFilename(wikiName) wikiPath := NameToFilename(wikiName)
entry, err := masterTree.GetTreeEntryByPath(wikiPath) entry, err := masterTree.GetTreeEntryByPath(wikiPath)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, wikiPath, entry.Name(), "%s not addded correctly", wikiName) assert.Equal(t, wikiPath, entry.Name(), "%s not added correctly", wikiName)
}) })
} }

@ -30873,7 +30873,7 @@ ol.ui.suffixed.list li:before,
List List
---------------*/ ---------------*/
/* Menu divider shouldnt apply */ /* Menu divider shouldn't apply */
.ui.menu .list .item:before { .ui.menu .list .item:before {
background: none !important; background: none !important;
@ -31802,7 +31802,7 @@ Floated Menu / Item
opacity: 1; opacity: 1;
} }
/* Icon Gylph */ /* Icon Glyph */
.ui.icon.menu i.icon:before { .ui.icon.menu i.icon:before {
opacity: 1; opacity: 1;

@ -142,7 +142,7 @@ $.api = $.fn.api = function(parameters) {
response = JSON.parse(response); response = JSON.parse(response);
} }
catch(e) { catch(e) {
// isnt json string // isn't json string
} }
} }
return response; return response;
@ -2220,7 +2220,7 @@ $.fn.dimmer = function(parameters) {
event: { event: {
click: function(event) { click: function(event) {
module.verbose('Determining if event occured on dimmer', event); module.verbose('Determining if event occurred on dimmer', event);
if( $dimmer.find(event.target).length === 0 || $(event.target).is(selector.content) ) { if( $dimmer.find(event.target).length === 0 || $(event.target).is(selector.content) ) {
module.hide(); module.hide();
event.stopImmediatePropagation(); event.stopImmediatePropagation();
@ -3368,7 +3368,7 @@ $.fn.dropdown = function(parameters) {
if(settings.onHide.call(element) !== false) { if(settings.onHide.call(element) !== false) {
module.animate.hide(function() { module.animate.hide(function() {
module.remove.visible(); module.remove.visible();
// hidding search focus // hiding search focus
if ( module.is.focusedOnSearch() && preventBlur !== true ) { if ( module.is.focusedOnSearch() && preventBlur !== true ) {
$search.blur(); $search.blur();
} }
@ -11937,7 +11937,7 @@ $.fn.progress = function(parameters) {
* *
* @param min A minimum value within multiple values * @param min A minimum value within multiple values
* @param total A total amount of multiple values * @param total A total amount of multiple values
* @returns {number} A precison. Could be 1, 10, 100, ... 1e+10. * @returns {number} A precision. Could be 1, 10, 100, ... 1e+10.
*/ */
derivePrecision: function(min, total) { derivePrecision: function(min, total) {
var precisionPower = 0 var precisionPower = 0
@ -12837,7 +12837,7 @@ $.fn.progress.settings = {
nonNumeric : 'Progress value is non numeric', nonNumeric : 'Progress value is non numeric',
tooHigh : 'Value specified is above 100%', tooHigh : 'Value specified is above 100%',
tooLow : 'Value specified is below 0%', tooLow : 'Value specified is below 0%',
sumExceedsTotal : 'Sum of multple values exceed total', sumExceedsTotal : 'Sum of multiple values exceed total',
}, },
regExp: { regExp: {
@ -18076,7 +18076,7 @@ $.fn.transition.settings = {
// possible errors // possible errors
error: { error: {
noAnimation : 'Element is no longer attached to DOM. Unable to animate. Use silent setting to surpress this warning in production.', noAnimation : 'Element is no longer attached to DOM. Unable to animate. Use silent setting to suppress this warning in production.',
repeated : 'That animation is already occurring, cancelling repeated animation', repeated : 'That animation is already occurring, cancelling repeated animation',
method : 'The method you called is not defined', method : 'The method you called is not defined',
support : 'This browser does not support CSS animations' support : 'This browser does not support CSS animations'

@ -34,7 +34,7 @@ const commentMDEditors = {};
// Silence fomantic's error logging when tabs are used without a target content element // Silence fomantic's error logging when tabs are used without a target content element
$.fn.tab.settings.silent = true; $.fn.tab.settings.silent = true;
// Silence Vue's console advertisments in dev mode // Silence Vue's console advertisements in dev mode
// To use the Vue browser extension, enable the devtools option temporarily // To use the Vue browser extension, enable the devtools option temporarily
Vue.config.productionTip = false; Vue.config.productionTip = false;
Vue.config.devtools = false; Vue.config.devtools = false;
@ -461,7 +461,7 @@ function initCommentForm() {
} }
// TODO: Which thing should be done for choosing review requests // TODO: Which thing should be done for choosing review requests
// to make choosed items be shown on time here? // to make chosen items be shown on time here?
if (selector === 'select-reviewers-modify' || selector === 'select-assignees-modify') { if (selector === 'select-reviewers-modify' || selector === 'select-assignees-modify') {
return false; return false;
} }

@ -134,7 +134,7 @@ pre,
code, code,
kbd, kbd,
samp { samp {
font-size: .9em; /* compensate for monospace fonts being usually slighty larger */ font-size: .9em; /* compensate for monospace fonts being usually slightly larger */
font-family: var(--fonts-monospace); font-family: var(--fonts-monospace);
} }
@ -748,7 +748,7 @@ a.ui.card:hover,
box-shadow: none; box-shadow: none;
} }
/* Overide semantic selector '.ui.menu:not(.vertical) .item > .button' */ /* Override semantic selector '.ui.menu:not(.vertical) .item > .button' */
/* This fixes the commit graph button on the commits page */ /* This fixes the commit graph button on the commits page */
.menu:not(.vertical) .item > .button.compact { .menu:not(.vertical) .item > .button.compact {

@ -17,7 +17,7 @@
.mono { .mono {
font-family: var(--fonts-monospace) !important; font-family: var(--fonts-monospace) !important;
font-size: .9em !important; /* compensate for monospace fonts being usually slighty larger */ font-size: .9em !important; /* compensate for monospace fonts being usually slightly larger */
} }
.bold { font-weight: 600 !important; } .bold { font-weight: 600 !important; }

Loading…
Cancel
Save