Skip to content

feat: add restart command #3680

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions cmd/restart.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package cmd

import (
"os"
"os/signal"

"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/supabase/cli/internal/restart"
)

var (
restartProjectId string
restartAll bool

restartCmd = &cobra.Command{
GroupID: groupLocalDev,
Use: "restart",
Short: "Restart all local Supabase containers",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, _ := signal.NotifyContext(cmd.Context(), os.Interrupt)
return restart.Run(ctx, restartProjectId, restartAll, afero.NewOsFs())
},
}
)

func init() {
flags := restartCmd.Flags()
flags.Bool("backup", true, "Backs up the current database before restarting.")
flags.StringVar(&restartProjectId, "project-id", "", "Local project ID to restart.")
cobra.CheckErr(flags.MarkHidden("backup"))
flags.BoolVar(&restartAll, "all", false, "Restart all local Supabase instances from all projects across the machine.")
restartCmd.MarkFlagsMutuallyExclusive("project-id", "all")
rootCmd.AddCommand(restartCmd)
}
9 changes: 9 additions & 0 deletions docs/supabase/restart.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
## supabase-restart

Restarts the Supabase local development stack.

Requires `supabase/config.toml` to be created in your current working directory by running `supabase init`.

This command uses Docker's native restart functionality to efficiently restart running containers without fully stopping and starting them. This approach is faster and maintains container state better than separate stop/start operations.

Use the `--all` flag to stop all local Supabase projects instances on the machine.
41 changes: 41 additions & 0 deletions internal/restart/restart.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package restart

import (
"context"
"fmt"
"io"
"os"

"github.com/spf13/afero"
"github.com/supabase/cli/internal/utils"
"github.com/supabase/cli/internal/utils/flags"
)

func Run(ctx context.Context, projectId string, all bool, fsys afero.Fs) error {
var searchProjectIdFilter string
if !all {
// Sanity checks.
if len(projectId) > 0 {
utils.Config.ProjectId = projectId
} else if err := flags.LoadConfig(fsys); err != nil {
return err
}
searchProjectIdFilter = utils.Config.ProjectId
}

// Restart all services
if err := utils.RunProgram(ctx, func(p utils.Program, ctx context.Context) error {
w := utils.StatusWriter{Program: p}
return restart(ctx, w, searchProjectIdFilter)
}); err != nil {
return err
}

fmt.Fprintf(os.Stderr, "Restarted %s local development setup.\n\n", utils.Aqua("supabase"))

return nil
}

func restart(ctx context.Context, w io.Writer, projectId string) error {
return utils.DockerRestartAll(ctx, w, projectId)
}
141 changes: 141 additions & 0 deletions internal/restart/restart_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
package restart

import (
"context"
"fmt"
"io"
"net/http"
"testing"

"github.com/docker/docker/api/types/container"
"github.com/h2non/gock"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/supabase/cli/internal/testing/apitest"
"github.com/supabase/cli/internal/utils"
)

func TestRestartCommand(t *testing.T) {
t.Run("restart containers", func(t *testing.T) {
// Setup in-memory fs
fsys := afero.NewMemMapFs()
require.NoError(t, utils.WriteConfig(fsys, false))
// Setup mock docker
require.NoError(t, apitest.MockDocker(utils.Docker))
defer gock.OffAll()
gock.New(utils.Docker.DaemonHost()).
Get("/v" + utils.Docker.ClientVersion() + "/containers/json").
Reply(http.StatusOK).
JSON([]container.Summary{})

// Run test
err := Run(context.Background(), "", false, fsys)
// Check error
assert.NoError(t, err)
assert.Empty(t, apitest.ListUnmatchedRequests())
})

t.Run("restart all instances when --all flag is used", func(t *testing.T) {
// Setup in-memory fs
fsys := afero.NewMemMapFs()
require.NoError(t, utils.WriteConfig(fsys, false))
// Setup mock docker
require.NoError(t, apitest.MockDocker(utils.Docker))
defer gock.OffAll()

projects := []string{"project1", "project2"}

// Mock initial ContainerList for all containers
gock.New(utils.Docker.DaemonHost()).
Get("/v"+utils.Docker.ClientVersion()+"/containers/json").
MatchParam("all", "true").
Reply(http.StatusOK).
JSON([]container.Summary{
{ID: "container1", Labels: map[string]string{utils.CliProjectLabel: "project1"}},
{ID: "container2", Labels: map[string]string{utils.CliProjectLabel: "project2"}},
})

// Mock restartOneProject for each project
for _, projectId := range projects {
// Mock ContainerList for each project
gock.New(utils.Docker.DaemonHost()).
Get("/v"+utils.Docker.ClientVersion()+"/containers/json").
MatchParam("all", "1").
MatchParam("filters", fmt.Sprintf(`{"label":{"com.supabase.cli.project=%s":true}}`, projectId)).
Reply(http.StatusOK).
JSON([]container.Summary{{ID: "container-" + projectId, State: "running"}})

// Mock container restart
gock.New(utils.Docker.DaemonHost()).
Post("/v" + utils.Docker.ClientVersion() + "/containers/container-" + projectId + "/restart").
Reply(http.StatusOK)
}

// Mock final ContainerList to verify all containers are restarted
gock.New(utils.Docker.DaemonHost()).
Get("/v"+utils.Docker.ClientVersion()+"/containers/json").
MatchParam("all", "true").
Reply(http.StatusOK).
JSON([]container.Summary{})
gock.New(utils.Docker.DaemonHost()).
Get("/v" + utils.Docker.ClientVersion() + "/containers/json").
Reply(http.StatusOK).
JSON([]container.Summary{})

// Run test
err := Run(context.Background(), "", true, fsys)

// Check error
assert.NoError(t, err)
assert.Empty(t, apitest.ListUnmatchedRequests())
})

t.Run("throws error on malformed config", func(t *testing.T) {
// Setup in-memory fs
fsys := afero.NewMemMapFs()
require.NoError(t, afero.WriteFile(fsys, utils.ConfigPath, []byte("malformed"), 0644))
// Run test
err := Run(context.Background(), "", false, fsys)
// Check error
assert.ErrorContains(t, err, "toml: expected = after a key, but the document ends there")
})

t.Run("throws error on restart failure", func(t *testing.T) {
// Setup in-memory fs
fsys := afero.NewMemMapFs()
require.NoError(t, utils.WriteConfig(fsys, false))
// Setup mock docker
require.NoError(t, apitest.MockDocker(utils.Docker))
defer gock.OffAll()
gock.New(utils.Docker.DaemonHost()).
Get("/v" + utils.Docker.ClientVersion() + "/containers/json").
Reply(http.StatusServiceUnavailable)
// Run test
err := Run(context.Background(), "test", false, afero.NewReadOnlyFs(fsys))
// Check error
assert.ErrorContains(t, err, "request returned 503 Service Unavailable for API route and version")
assert.Empty(t, apitest.ListUnmatchedRequests())
})
}

func TestRestartServices(t *testing.T) {
t.Run("restart all services", func(t *testing.T) {
containers := []container.Summary{{ID: "c1", State: "running"}, {ID: "c2"}}
// Setup mock docker
require.NoError(t, apitest.MockDocker(utils.Docker))
defer gock.OffAll()
gock.New(utils.Docker.DaemonHost()).
Get("/v" + utils.Docker.ClientVersion() + "/containers/json").
Reply(http.StatusOK).
JSON(containers)
gock.New(utils.Docker.DaemonHost()).
Post("/v" + utils.Docker.ClientVersion() + "/containers/" + containers[0].ID + "/restart").
Reply(http.StatusOK)
// Run test
err := restart(context.Background(), io.Discard, utils.Config.ProjectId)
// Check error
assert.NoError(t, err)
assert.Empty(t, apitest.ListUnmatchedRequests())
})
}
8 changes: 8 additions & 0 deletions internal/testing/apitest/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,14 @@ func MockDockerStop(docker *client.Client) {
JSON(network.PruneReport{})
}

// Ref: internal/utils/docker.go::DockerRestartAll
func MockDockerRestart(docker *client.Client) {
gock.New(docker.DaemonHost()).
Get("/v" + docker.ClientVersion() + "/containers/json").
Reply(http.StatusOK).
JSON([]container.Summary{})
}

// Ref: internal/utils/docker.go::DockerRunOnce
func setupDockerLogs(docker *client.Client, containerID, stdout string, exitCode int) error {
var body bytes.Buffer
Expand Down
30 changes: 30 additions & 0 deletions internal/utils/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,36 @@ func DockerRemoveAll(ctx context.Context, w io.Writer, projectId string) error {
return nil
}

func DockerRestartAll(ctx context.Context, w io.Writer, projectId string) error {
fmt.Fprintln(w, "Restarting containers...")
args := CliProjectFilter(projectId)
containers, err := Docker.ContainerList(ctx, container.ListOptions{
All: true,
Filters: args,
})
if err != nil {
return errors.Errorf("failed to list containers: %w", err)
}
// Restart containers
var ids []string
for _, c := range containers {
if c.State == "running" {
ids = append(ids, c.ID)
}
}
result := WaitAll(ids, func(id string) error {
if err := Docker.ContainerRestart(ctx, id, container.StopOptions{}); err != nil {
return errors.Errorf("failed to stop container: %w", err)
}
return nil
})
if err := errors.Join(result...); err != nil {
return err
}

return nil
}

func CliProjectFilter(projectId string) filters.Args {
if len(projectId) == 0 {
return filters.NewArgs(
Expand Down
Loading